diff --git a/README.md b/README.md index ab12d621..089ec440 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ You can use OpenCommit by simply running it via the CLI like this `oco`. 2 secon 3. Set the key to OpenCommit config: ```sh - oco config set OCO_OPENAI_API_KEY= + oco config set OCO_API_KEY= ``` Your API key is stored locally in the `~/.opencommit` config file. @@ -91,7 +91,7 @@ oco --fgm Create a `.env` file and add OpenCommit config variables there like this: ```env -OCO_OPENAI_API_KEY= +OCO_API_KEY= OCO_TOKENS_MAX_INPUT= OCO_TOKENS_MAX_OUTPUT= OCO_OPENAI_BASE_PATH= @@ -343,7 +343,7 @@ jobs: # set openAI api key in repo actions secrets, # for openAI keys go to: https://platform.openai.com/account/api-keys # for repo secret go to: /settings/secrets/actions - OCO_OPENAI_API_KEY: ${{ secrets.OCO_OPENAI_API_KEY }} + OCO_API_KEY: ${{ secrets.OCO_API_KEY }} # customization OCO_TOKENS_MAX_INPUT: 4096 diff --git a/out/cli.cjs b/out/cli.cjs index f33017d8..8b444fbd 100755 --- a/out/cli.cjs +++ b/out/cli.cjs @@ -16384,7 +16384,7 @@ function G3(t, e2) { // package.json var package_default = { name: "opencommit", - version: "3.0.11", + version: "3.0.12", description: "Auto-generate impressive commits in 1 second. Killing lame commits with AI \u{1F92F}\u{1F52B}", keywords: [ "git", @@ -16453,6 +16453,7 @@ var package_default = { "@actions/github": "^5.1.1", "@clack/prompts": "^0.6.1", "@dqbd/tiktoken": "^1.0.2", + "@google/generative-ai": "^0.10.0", "@octokit/webhooks-schemas": "^6.11.0", "@octokit/webhooks-types": "^6.11.0", ai: "^2.2.14", @@ -18658,25 +18659,29 @@ var validateConfig = (key, condition, validationMessage) => { process.exit(1); } }; -var configValidators = { - ["OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */](value, config8 = {}) { - validateConfig( - "API_KEY", - value || config8.OCO_AI_PROVIDER == "ollama", - "You need to provide an API key" - ); +var apiKeyValidator = (value, config8 = {}) => { + validateConfig( + "API_KEY", + value || process.env.OCO_OPENAI_API_KEY || process.env.OCO_API_KEY || config8.OCO_AI_PROVIDER == "ollama" || config8.OCO_AI_PROVIDER == "gemini", + "You need to provide an API key" + ); + if (config8.OCO_AI_PROVIDER != "gemini") { validateConfig( - "OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */, + "OCO_API_KEY" /* OCO_API_KEY */, value.startsWith("sk-"), 'Must start with "sk-"' ); validateConfig( - "OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */, + "OCO_API_KEY" /* OCO_API_KEY */, config8["OCO_OPENAI_BASE_PATH" /* OCO_OPENAI_BASE_PATH */] || value.length === 51, "Must be 51 characters long" ); - return value; - }, + } + return value; +}; +var configValidators = { + ["OCO_API_KEY" /* OCO_API_KEY */]: apiKeyValidator, + ["OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */]: apiKeyValidator, ["OCO_DESCRIPTION" /* OCO_DESCRIPTION */](value) { validateConfig( "OCO_DESCRIPTION" /* OCO_DESCRIPTION */, @@ -18741,19 +18746,21 @@ var configValidators = { ); return value; }, - ["OCO_MODEL" /* OCO_MODEL */](value) { - validateConfig( - "OCO_MODEL" /* OCO_MODEL */, - [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125", - "gpt-4", - "gpt-4-1106-preview", - "gpt-4-turbo-preview", - "gpt-4-0125-preview" - ].includes(value), - `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` - ); + ["OCO_MODEL" /* OCO_MODEL */](value, config8) { + if (config8.OCO_AI_PROVIDER != "gemini") { + validateConfig( + "OCO_MODEL" /* OCO_MODEL */, + [ + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125", + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-turbo-preview", + "gpt-4-0125-preview" + ].includes(value), + `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` + ); + } return value; }, ["OCO_MESSAGE_TEMPLATE_PLACEHOLDER" /* OCO_MESSAGE_TEMPLATE_PLACEHOLDER */](value) { @@ -18778,7 +18785,8 @@ var configValidators = { [ "", "openai", - "ollama" + "ollama", + "gemini" ].includes(value), `${value} is not supported yet, use 'ollama' or 'openai' (default)` ); @@ -18796,7 +18804,7 @@ var configValidators = { var configPath = (0, import_path.join)((0, import_os.homedir)(), ".opencommit"); var getConfig = () => { const configFromEnv = { - OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY, + OCO_API_KEY: process.env.OCO_API_KEY || process.env.OCO_OPENAI_API_KEY, OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT ? Number(process.env.OCO_TOKENS_MAX_INPUT) : void 0, OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT ? Number(process.env.OCO_TOKENS_MAX_OUTPUT) : void 0, OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH, @@ -21943,13 +21951,13 @@ var config3 = getConfig(); var MAX_TOKENS_OUTPUT = config3?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */; var MAX_TOKENS_INPUT = config3?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */; var basePath = config3?.OCO_OPENAI_BASE_PATH; -var apiKey = config3?.OCO_OPENAI_API_KEY; +var apiKey = config3?.OCO_API_KEY || config3?.OCO_OPENAI_API_KEY; var [command, mode] = process.argv.slice(2); var isLocalModel = config3?.OCO_AI_PROVIDER == "ollama"; if (!apiKey && command !== "config" && mode !== "set" /* set */ && !isLocalModel) { ae("opencommit"); ce( - "OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_OPENAI_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`" + "OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`" ); ce( "For help look into README https://github.com/di-sukharev/opencommit#setup" @@ -21958,49 +21966,947 @@ if (!apiKey && command !== "config" && mode !== "set" /* set */ && !isLocalModel } var MODEL = config3?.OCO_MODEL || "gpt-3.5-turbo"; var OpenAi = class { - openAiApiConfiguration = new import_openai2.Configuration({ - apiKey - }); - openAI; constructor() { + this.openAiApiConfiguration = new import_openai2.Configuration({ + apiKey + }); + this.generateCommitMessage = async (messages) => { + const params = { + model: MODEL, + messages, + temperature: 0, + top_p: 0.1, + max_tokens: MAX_TOKENS_OUTPUT + }; + try { + const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b6) => a2 + b6, 0); + if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { + throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */); + } + const { data } = await this.openAI.createChatCompletion(params); + const message = data.choices[0].message; + return message?.content; + } catch (error) { + ce(`${source_default.red("\u2716")} ${JSON.stringify(params)}`); + const err = error; + ce(`${source_default.red("\u2716")} ${err?.message || err}`); + if (axios_default.isAxiosError(error) && error.response?.status === 401) { + const openAiError = error.response.data.error; + if (openAiError?.message) + ce(openAiError.message); + ce( + "For help look into README https://github.com/di-sukharev/opencommit#setup" + ); + } + throw err; + } + }; if (basePath) { this.openAiApiConfiguration.basePath = basePath; } this.openAI = new import_openai2.OpenAIApi(this.openAiApiConfiguration); } - generateCommitMessage = async (messages) => { - const params = { - model: MODEL, - messages, - temperature: 0, - top_p: 0.1, - max_tokens: MAX_TOKENS_OUTPUT - }; +}; + +// node_modules/@google/generative-ai/dist/index.mjs +var POSSIBLE_ROLES = ["user", "model", "function", "system"]; +var HarmCategory; +(function(HarmCategory2) { + HarmCategory2["HARM_CATEGORY_UNSPECIFIED"] = "HARM_CATEGORY_UNSPECIFIED"; + HarmCategory2["HARM_CATEGORY_HATE_SPEECH"] = "HARM_CATEGORY_HATE_SPEECH"; + HarmCategory2["HARM_CATEGORY_SEXUALLY_EXPLICIT"] = "HARM_CATEGORY_SEXUALLY_EXPLICIT"; + HarmCategory2["HARM_CATEGORY_HARASSMENT"] = "HARM_CATEGORY_HARASSMENT"; + HarmCategory2["HARM_CATEGORY_DANGEROUS_CONTENT"] = "HARM_CATEGORY_DANGEROUS_CONTENT"; +})(HarmCategory || (HarmCategory = {})); +var HarmBlockThreshold; +(function(HarmBlockThreshold2) { + HarmBlockThreshold2["HARM_BLOCK_THRESHOLD_UNSPECIFIED"] = "HARM_BLOCK_THRESHOLD_UNSPECIFIED"; + HarmBlockThreshold2["BLOCK_LOW_AND_ABOVE"] = "BLOCK_LOW_AND_ABOVE"; + HarmBlockThreshold2["BLOCK_MEDIUM_AND_ABOVE"] = "BLOCK_MEDIUM_AND_ABOVE"; + HarmBlockThreshold2["BLOCK_ONLY_HIGH"] = "BLOCK_ONLY_HIGH"; + HarmBlockThreshold2["BLOCK_NONE"] = "BLOCK_NONE"; +})(HarmBlockThreshold || (HarmBlockThreshold = {})); +var HarmProbability; +(function(HarmProbability2) { + HarmProbability2["HARM_PROBABILITY_UNSPECIFIED"] = "HARM_PROBABILITY_UNSPECIFIED"; + HarmProbability2["NEGLIGIBLE"] = "NEGLIGIBLE"; + HarmProbability2["LOW"] = "LOW"; + HarmProbability2["MEDIUM"] = "MEDIUM"; + HarmProbability2["HIGH"] = "HIGH"; +})(HarmProbability || (HarmProbability = {})); +var BlockReason; +(function(BlockReason2) { + BlockReason2["BLOCKED_REASON_UNSPECIFIED"] = "BLOCKED_REASON_UNSPECIFIED"; + BlockReason2["SAFETY"] = "SAFETY"; + BlockReason2["OTHER"] = "OTHER"; +})(BlockReason || (BlockReason = {})); +var FinishReason; +(function(FinishReason2) { + FinishReason2["FINISH_REASON_UNSPECIFIED"] = "FINISH_REASON_UNSPECIFIED"; + FinishReason2["STOP"] = "STOP"; + FinishReason2["MAX_TOKENS"] = "MAX_TOKENS"; + FinishReason2["SAFETY"] = "SAFETY"; + FinishReason2["RECITATION"] = "RECITATION"; + FinishReason2["OTHER"] = "OTHER"; +})(FinishReason || (FinishReason = {})); +var TaskType; +(function(TaskType2) { + TaskType2["TASK_TYPE_UNSPECIFIED"] = "TASK_TYPE_UNSPECIFIED"; + TaskType2["RETRIEVAL_QUERY"] = "RETRIEVAL_QUERY"; + TaskType2["RETRIEVAL_DOCUMENT"] = "RETRIEVAL_DOCUMENT"; + TaskType2["SEMANTIC_SIMILARITY"] = "SEMANTIC_SIMILARITY"; + TaskType2["CLASSIFICATION"] = "CLASSIFICATION"; + TaskType2["CLUSTERING"] = "CLUSTERING"; +})(TaskType || (TaskType = {})); +var FunctionCallingMode; +(function(FunctionCallingMode2) { + FunctionCallingMode2["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED"; + FunctionCallingMode2["AUTO"] = "AUTO"; + FunctionCallingMode2["ANY"] = "ANY"; + FunctionCallingMode2["NONE"] = "NONE"; +})(FunctionCallingMode || (FunctionCallingMode = {})); +var FunctionDeclarationSchemaType; +(function(FunctionDeclarationSchemaType2) { + FunctionDeclarationSchemaType2["STRING"] = "STRING"; + FunctionDeclarationSchemaType2["NUMBER"] = "NUMBER"; + FunctionDeclarationSchemaType2["INTEGER"] = "INTEGER"; + FunctionDeclarationSchemaType2["BOOLEAN"] = "BOOLEAN"; + FunctionDeclarationSchemaType2["ARRAY"] = "ARRAY"; + FunctionDeclarationSchemaType2["OBJECT"] = "OBJECT"; +})(FunctionDeclarationSchemaType || (FunctionDeclarationSchemaType = {})); +var GoogleGenerativeAIError = class extends Error { + constructor(message) { + super(`[GoogleGenerativeAI Error]: ${message}`); + } +}; +var GoogleGenerativeAIResponseError = class extends GoogleGenerativeAIError { + constructor(message, response) { + super(message); + this.response = response; + } +}; +var GoogleGenerativeAIFetchError = class extends GoogleGenerativeAIError { + constructor(message, status, statusText, errorDetails) { + super(message); + this.status = status; + this.statusText = statusText; + this.errorDetails = errorDetails; + } +}; +var GoogleGenerativeAIRequestInputError = class extends GoogleGenerativeAIError { +}; +var DEFAULT_BASE_URL = "https://generativelanguage.googleapis.com"; +var DEFAULT_API_VERSION = "v1beta"; +var PACKAGE_VERSION = "0.10.0"; +var PACKAGE_LOG_HEADER = "genai-js"; +var Task; +(function(Task2) { + Task2["GENERATE_CONTENT"] = "generateContent"; + Task2["STREAM_GENERATE_CONTENT"] = "streamGenerateContent"; + Task2["COUNT_TOKENS"] = "countTokens"; + Task2["EMBED_CONTENT"] = "embedContent"; + Task2["BATCH_EMBED_CONTENTS"] = "batchEmbedContents"; +})(Task || (Task = {})); +var RequestUrl = class { + constructor(model, task, apiKey2, stream4, requestOptions) { + this.model = model; + this.task = task; + this.apiKey = apiKey2; + this.stream = stream4; + this.requestOptions = requestOptions; + } + toString() { + var _a, _b; + const apiVersion = ((_a = this.requestOptions) === null || _a === void 0 ? void 0 : _a.apiVersion) || DEFAULT_API_VERSION; + const baseUrl = ((_b = this.requestOptions) === null || _b === void 0 ? void 0 : _b.baseUrl) || DEFAULT_BASE_URL; + let url3 = `${baseUrl}/${apiVersion}/${this.model}:${this.task}`; + if (this.stream) { + url3 += "?alt=sse"; + } + return url3; + } +}; +function getClientHeaders(requestOptions) { + const clientHeaders = []; + if (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.apiClient) { + clientHeaders.push(requestOptions.apiClient); + } + clientHeaders.push(`${PACKAGE_LOG_HEADER}/${PACKAGE_VERSION}`); + return clientHeaders.join(" "); +} +async function getHeaders(url3) { + const headers = new Headers(); + headers.append("Content-Type", "application/json"); + headers.append("x-goog-api-client", getClientHeaders(url3.requestOptions)); + headers.append("x-goog-api-key", url3.apiKey); + let customHeaders = url3.requestOptions.customHeaders; + if (customHeaders) { + if (!(customHeaders instanceof Headers)) { + try { + customHeaders = new Headers(customHeaders); + } catch (e2) { + throw new GoogleGenerativeAIRequestInputError(`unable to convert customHeaders value ${JSON.stringify(customHeaders)} to Headers: ${e2.message}`); + } + } + for (const [headerName, headerValue] of customHeaders.entries()) { + if (headerName === "x-goog-api-key") { + throw new GoogleGenerativeAIRequestInputError(`Cannot set reserved header name ${headerName}`); + } else if (headerName === "x-goog-api-client") { + throw new GoogleGenerativeAIRequestInputError(`Header name ${headerName} can only be set using the apiClient field`); + } + headers.append(headerName, headerValue); + } + } + return headers; +} +async function constructRequest(model, task, apiKey2, stream4, body, requestOptions) { + const url3 = new RequestUrl(model, task, apiKey2, stream4, requestOptions); + return { + url: url3.toString(), + fetchOptions: Object.assign(Object.assign({}, buildFetchOptions(requestOptions)), { method: "POST", headers: await getHeaders(url3), body }) + }; +} +async function makeRequest(model, task, apiKey2, stream4, body, requestOptions) { + return _makeRequestInternal(model, task, apiKey2, stream4, body, requestOptions, fetch); +} +async function _makeRequestInternal(model, task, apiKey2, stream4, body, requestOptions, fetchFn = fetch) { + const url3 = new RequestUrl(model, task, apiKey2, stream4, requestOptions); + let response; + try { + const request = await constructRequest(model, task, apiKey2, stream4, body, requestOptions); + response = await fetchFn(request.url, request.fetchOptions); + if (!response.ok) { + let message = ""; + let errorDetails; + try { + const json = await response.json(); + message = json.error.message; + if (json.error.details) { + message += ` ${JSON.stringify(json.error.details)}`; + errorDetails = json.error.details; + } + } catch (e2) { + } + throw new GoogleGenerativeAIFetchError(`Error fetching from ${url3.toString()}: [${response.status} ${response.statusText}] ${message}`, response.status, response.statusText, errorDetails); + } + } catch (e2) { + let err = e2; + if (!(e2 instanceof GoogleGenerativeAIFetchError || e2 instanceof GoogleGenerativeAIRequestInputError)) { + err = new GoogleGenerativeAIError(`Error fetching from ${url3.toString()}: ${e2.message}`); + err.stack = e2.stack; + } + throw err; + } + return response; +} +function buildFetchOptions(requestOptions) { + const fetchOptions = {}; + if ((requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeout) >= 0) { + const abortController = new AbortController(); + const signal = abortController.signal; + setTimeout(() => abortController.abort(), requestOptions.timeout); + fetchOptions.signal = signal; + } + return fetchOptions; +} +function addHelpers(response) { + response.text = () => { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + console.warn(`This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response); + } + return getText(response); + } else if (response.promptFeedback) { + throw new GoogleGenerativeAIResponseError(`Text not available. ${formatBlockErrorMessage(response)}`, response); + } + return ""; + }; + response.functionCall = () => { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + console.warn(`This response had ${response.candidates.length} candidates. Returning function calls from the first candidate only. Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response); + } + console.warn(`response.functionCall() is deprecated. Use response.functionCalls() instead.`); + return getFunctionCalls(response)[0]; + } else if (response.promptFeedback) { + throw new GoogleGenerativeAIResponseError(`Function call not available. ${formatBlockErrorMessage(response)}`, response); + } + return void 0; + }; + response.functionCalls = () => { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + console.warn(`This response had ${response.candidates.length} candidates. Returning function calls from the first candidate only. Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response); + } + return getFunctionCalls(response); + } else if (response.promptFeedback) { + throw new GoogleGenerativeAIResponseError(`Function call not available. ${formatBlockErrorMessage(response)}`, response); + } + return void 0; + }; + return response; +} +function getText(response) { + var _a, _b, _c, _d; + if ((_d = (_c = (_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0].content) === null || _b === void 0 ? void 0 : _b.parts) === null || _c === void 0 ? void 0 : _c[0]) === null || _d === void 0 ? void 0 : _d.text) { + return response.candidates[0].content.parts.map(({ text }) => text).join(""); + } else { + return ""; + } +} +function getFunctionCalls(response) { + var _a, _b, _c, _d; + const functionCalls = []; + if ((_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0].content) === null || _b === void 0 ? void 0 : _b.parts) { + for (const part of (_d = (_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0].content) === null || _d === void 0 ? void 0 : _d.parts) { + if (part.functionCall) { + functionCalls.push(part.functionCall); + } + } + } + if (functionCalls.length > 0) { + return functionCalls; + } else { + return void 0; + } +} +var badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY]; +function hadBadFinishReason(candidate) { + return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason); +} +function formatBlockErrorMessage(response) { + var _a, _b, _c; + let message = ""; + if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { + message += "Response was blocked"; + if ((_a = response.promptFeedback) === null || _a === void 0 ? void 0 : _a.blockReason) { + message += ` due to ${response.promptFeedback.blockReason}`; + } + if ((_b = response.promptFeedback) === null || _b === void 0 ? void 0 : _b.blockReasonMessage) { + message += `: ${response.promptFeedback.blockReasonMessage}`; + } + } else if ((_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0]) { + const firstCandidate = response.candidates[0]; + if (hadBadFinishReason(firstCandidate)) { + message += `Candidate was blocked due to ${firstCandidate.finishReason}`; + if (firstCandidate.finishMessage) { + message += `: ${firstCandidate.finishMessage}`; + } + } + } + return message; +} +function __await(v4) { + return this instanceof __await ? (this.v = v4, this) : new __await(v4); +} +function __asyncGenerator(thisArg, _arguments, generator) { + if (!Symbol.asyncIterator) + throw new TypeError("Symbol.asyncIterator is not defined."); + var g3 = generator.apply(thisArg, _arguments || []), i2, q5 = []; + return i2 = {}, verb("next"), verb("throw"), verb("return"), i2[Symbol.asyncIterator] = function() { + return this; + }, i2; + function verb(n) { + if (g3[n]) + i2[n] = function(v4) { + return new Promise(function(a2, b6) { + q5.push([n, v4, a2, b6]) > 1 || resume(n, v4); + }); + }; + } + function resume(n, v4) { try { - const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b6) => a2 + b6, 0); - if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { - throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */); + step(g3[n](v4)); + } catch (e2) { + settle2(q5[0][3], e2); + } + } + function step(r2) { + r2.value instanceof __await ? Promise.resolve(r2.value.v).then(fulfill, reject) : settle2(q5[0][2], r2); + } + function fulfill(value) { + resume("next", value); + } + function reject(value) { + resume("throw", value); + } + function settle2(f3, v4) { + if (f3(v4), q5.shift(), q5.length) + resume(q5[0][0], q5[0][1]); + } +} +var responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; +function processStream(response) { + const inputStream = response.body.pipeThrough(new TextDecoderStream("utf8", { fatal: true })); + const responseStream = getResponseStream(inputStream); + const [stream1, stream22] = responseStream.tee(); + return { + stream: generateResponseSequence(stream1), + response: getResponsePromise(stream22) + }; +} +async function getResponsePromise(stream4) { + const allResponses = []; + const reader = stream4.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + return addHelpers(aggregateResponses(allResponses)); + } + allResponses.push(value); + } +} +function generateResponseSequence(stream4) { + return __asyncGenerator(this, arguments, function* generateResponseSequence_1() { + const reader = stream4.getReader(); + while (true) { + const { value, done } = yield __await(reader.read()); + if (done) { + break; } - const { data } = await this.openAI.createChatCompletion(params); - const message = data.choices[0].message; - return message?.content; - } catch (error) { - ce(`${source_default.red("\u2716")} ${JSON.stringify(params)}`); - const err = error; - ce(`${source_default.red("\u2716")} ${err?.message || err}`); - if (axios_default.isAxiosError(error) && error.response?.status === 401) { - const openAiError = error.response.data.error; - if (openAiError?.message) - ce(openAiError.message); - ce( - "For help look into README https://github.com/di-sukharev/opencommit#setup" - ); + yield yield __await(addHelpers(value)); + } + }); +} +function getResponseStream(inputStream) { + const reader = inputStream.getReader(); + const stream4 = new ReadableStream({ + start(controller) { + let currentText = ""; + return pump(); + function pump() { + return reader.read().then(({ value, done }) => { + if (done) { + if (currentText.trim()) { + controller.error(new GoogleGenerativeAIError("Failed to parse stream")); + return; + } + controller.close(); + return; + } + currentText += value; + let match = currentText.match(responseLineRE); + let parsedResponse; + while (match) { + try { + parsedResponse = JSON.parse(match[1]); + } catch (e2) { + controller.error(new GoogleGenerativeAIError(`Error parsing JSON response: "${match[1]}"`)); + return; + } + controller.enqueue(parsedResponse); + currentText = currentText.substring(match[0].length); + match = currentText.match(responseLineRE); + } + return pump(); + }); } - throw err; } + }); + return stream4; +} +function aggregateResponses(responses) { + const lastResponse = responses[responses.length - 1]; + const aggregatedResponse = { + promptFeedback: lastResponse === null || lastResponse === void 0 ? void 0 : lastResponse.promptFeedback }; + for (const response of responses) { + if (response.candidates) { + for (const candidate of response.candidates) { + const i2 = candidate.index; + if (!aggregatedResponse.candidates) { + aggregatedResponse.candidates = []; + } + if (!aggregatedResponse.candidates[i2]) { + aggregatedResponse.candidates[i2] = { + index: candidate.index + }; + } + aggregatedResponse.candidates[i2].citationMetadata = candidate.citationMetadata; + aggregatedResponse.candidates[i2].finishReason = candidate.finishReason; + aggregatedResponse.candidates[i2].finishMessage = candidate.finishMessage; + aggregatedResponse.candidates[i2].safetyRatings = candidate.safetyRatings; + if (candidate.content && candidate.content.parts) { + if (!aggregatedResponse.candidates[i2].content) { + aggregatedResponse.candidates[i2].content = { + role: candidate.content.role || "user", + parts: [] + }; + } + const newPart = {}; + for (const part of candidate.content.parts) { + if (part.text) { + newPart.text = part.text; + } + if (part.functionCall) { + newPart.functionCall = part.functionCall; + } + if (Object.keys(newPart).length === 0) { + newPart.text = ""; + } + aggregatedResponse.candidates[i2].content.parts.push(newPart); + } + } + } + } + } + return aggregatedResponse; +} +async function generateContentStream(apiKey2, model, params, requestOptions) { + const response = await makeRequest( + model, + Task.STREAM_GENERATE_CONTENT, + apiKey2, + true, + JSON.stringify(params), + requestOptions + ); + return processStream(response); +} +async function generateContent(apiKey2, model, params, requestOptions) { + const response = await makeRequest( + model, + Task.GENERATE_CONTENT, + apiKey2, + false, + JSON.stringify(params), + requestOptions + ); + const responseJson = await response.json(); + const enhancedResponse = addHelpers(responseJson); + return { + response: enhancedResponse + }; +} +function formatSystemInstruction(input) { + if (input == null) { + return void 0; + } else if (typeof input === "string") { + return { role: "system", parts: [{ text: input }] }; + } else if (input.text) { + return { role: "system", parts: [input] }; + } else if (input.parts) { + if (!input.role) { + return { role: "system", parts: input.parts }; + } else { + return input; + } + } +} +function formatNewContent(request) { + let newParts = []; + if (typeof request === "string") { + newParts = [{ text: request }]; + } else { + for (const partOrString of request) { + if (typeof partOrString === "string") { + newParts.push({ text: partOrString }); + } else { + newParts.push(partOrString); + } + } + } + return assignRoleToPartsAndValidateSendMessageRequest(newParts); +} +function assignRoleToPartsAndValidateSendMessageRequest(parts) { + const userContent = { role: "user", parts: [] }; + const functionContent = { role: "function", parts: [] }; + let hasUserContent = false; + let hasFunctionContent = false; + for (const part of parts) { + if ("functionResponse" in part) { + functionContent.parts.push(part); + hasFunctionContent = true; + } else { + userContent.parts.push(part); + hasUserContent = true; + } + } + if (hasUserContent && hasFunctionContent) { + throw new GoogleGenerativeAIError("Within a single message, FunctionResponse cannot be mixed with other type of part in the request for sending chat message."); + } + if (!hasUserContent && !hasFunctionContent) { + throw new GoogleGenerativeAIError("No content is provided for sending chat message."); + } + if (hasUserContent) { + return userContent; + } + return functionContent; +} +function formatGenerateContentInput(params) { + let formattedRequest; + if (params.contents) { + formattedRequest = params; + } else { + const content = formatNewContent(params); + formattedRequest = { contents: [content] }; + } + if (params.systemInstruction) { + formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction); + } + return formattedRequest; +} +function formatEmbedContentInput(params) { + if (typeof params === "string" || Array.isArray(params)) { + const content = formatNewContent(params); + return { content }; + } + return params; +} +var VALID_PART_FIELDS = [ + "text", + "inlineData", + "functionCall", + "functionResponse" +]; +var VALID_PARTS_PER_ROLE = { + user: ["text", "inlineData"], + function: ["functionResponse"], + model: ["text", "functionCall"], + system: ["text"] +}; +var VALID_PREVIOUS_CONTENT_ROLES = { + user: ["model"], + function: ["model"], + model: ["user", "function"], + system: [] +}; +function validateChatHistory(history) { + let prevContent; + for (const currContent of history) { + const { role, parts } = currContent; + if (!prevContent && role !== "user") { + throw new GoogleGenerativeAIError(`First content should be with role 'user', got ${role}`); + } + if (!POSSIBLE_ROLES.includes(role)) { + throw new GoogleGenerativeAIError(`Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`); + } + if (!Array.isArray(parts)) { + throw new GoogleGenerativeAIError("Content should have 'parts' property with an array of Parts"); + } + if (parts.length === 0) { + throw new GoogleGenerativeAIError("Each Content should have at least one part"); + } + const countFields = { + text: 0, + inlineData: 0, + functionCall: 0, + functionResponse: 0, + fileData: 0 + }; + for (const part of parts) { + for (const key of VALID_PART_FIELDS) { + if (key in part) { + countFields[key] += 1; + } + } + } + const validParts = VALID_PARTS_PER_ROLE[role]; + for (const key of VALID_PART_FIELDS) { + if (!validParts.includes(key) && countFields[key] > 0) { + throw new GoogleGenerativeAIError(`Content with role '${role}' can't contain '${key}' part`); + } + } + if (prevContent) { + const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; + if (!validPreviousContentRoles.includes(prevContent.role)) { + throw new GoogleGenerativeAIError(`Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`); + } + } + prevContent = currContent; + } +} +var SILENT_ERROR = "SILENT_ERROR"; +var ChatSession = class { + constructor(apiKey2, model, params, requestOptions) { + this.model = model; + this.params = params; + this.requestOptions = requestOptions; + this._history = []; + this._sendPromise = Promise.resolve(); + this._apiKey = apiKey2; + if (params === null || params === void 0 ? void 0 : params.history) { + validateChatHistory(params.history); + this._history = params.history; + } + } + async getHistory() { + await this._sendPromise; + return this._history; + } + async sendMessage(request) { + var _a, _b, _c, _d, _e; + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: (_a = this.params) === null || _a === void 0 ? void 0 : _a.safetySettings, + generationConfig: (_b = this.params) === null || _b === void 0 ? void 0 : _b.generationConfig, + tools: (_c = this.params) === null || _c === void 0 ? void 0 : _c.tools, + toolConfig: (_d = this.params) === null || _d === void 0 ? void 0 : _d.toolConfig, + systemInstruction: (_e = this.params) === null || _e === void 0 ? void 0 : _e.systemInstruction, + contents: [...this._history, newContent] + }; + let finalResult; + this._sendPromise = this._sendPromise.then(() => generateContent(this._apiKey, this.model, generateContentRequest, this.requestOptions)).then((result) => { + var _a2; + if (result.response.candidates && result.response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = Object.assign({ + parts: [], + role: "model" + }, (_a2 = result.response.candidates) === null || _a2 === void 0 ? void 0 : _a2[0].content); + this._history.push(responseContent); + } else { + const blockErrorMessage = formatBlockErrorMessage(result.response); + if (blockErrorMessage) { + console.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + finalResult = result; + }); + await this._sendPromise; + return finalResult; + } + async sendMessageStream(request) { + var _a, _b, _c, _d, _e; + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: (_a = this.params) === null || _a === void 0 ? void 0 : _a.safetySettings, + generationConfig: (_b = this.params) === null || _b === void 0 ? void 0 : _b.generationConfig, + tools: (_c = this.params) === null || _c === void 0 ? void 0 : _c.tools, + toolConfig: (_d = this.params) === null || _d === void 0 ? void 0 : _d.toolConfig, + systemInstruction: (_e = this.params) === null || _e === void 0 ? void 0 : _e.systemInstruction, + contents: [...this._history, newContent] + }; + const streamPromise = generateContentStream(this._apiKey, this.model, generateContentRequest, this.requestOptions); + this._sendPromise = this._sendPromise.then(() => streamPromise).catch((_ignored) => { + throw new Error(SILENT_ERROR); + }).then((streamResult) => streamResult.response).then((response) => { + if (response.candidates && response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = Object.assign({}, response.candidates[0].content); + if (!responseContent.role) { + responseContent.role = "model"; + } + this._history.push(responseContent); + } else { + const blockErrorMessage = formatBlockErrorMessage(response); + if (blockErrorMessage) { + console.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + }).catch((e2) => { + if (e2.message !== SILENT_ERROR) { + console.error(e2); + } + }); + return streamPromise; + } +}; +async function countTokens(apiKey2, model, params, requestOptions) { + const response = await makeRequest(model, Task.COUNT_TOKENS, apiKey2, false, JSON.stringify(Object.assign(Object.assign({}, params), { model })), requestOptions); + return response.json(); +} +async function embedContent(apiKey2, model, params, requestOptions) { + const response = await makeRequest(model, Task.EMBED_CONTENT, apiKey2, false, JSON.stringify(params), requestOptions); + return response.json(); +} +async function batchEmbedContents(apiKey2, model, params, requestOptions) { + const requestsWithModel = params.requests.map((request) => { + return Object.assign(Object.assign({}, request), { model }); + }); + const response = await makeRequest(model, Task.BATCH_EMBED_CONTENTS, apiKey2, false, JSON.stringify({ requests: requestsWithModel }), requestOptions); + return response.json(); +} +var GenerativeModel = class { + constructor(apiKey2, modelParams, requestOptions) { + this.apiKey = apiKey2; + if (modelParams.model.includes("/")) { + this.model = modelParams.model; + } else { + this.model = `models/${modelParams.model}`; + } + this.generationConfig = modelParams.generationConfig || {}; + this.safetySettings = modelParams.safetySettings || []; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + this.requestOptions = requestOptions || {}; + } + async generateContent(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContent(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction }, formattedParams), this.requestOptions); + } + async generateContentStream(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContentStream(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction }, formattedParams), this.requestOptions); + } + startChat(startChatParams) { + return new ChatSession(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction }, startChatParams), this.requestOptions); + } + async countTokens(request) { + const formattedParams = formatGenerateContentInput(request); + return countTokens(this.apiKey, this.model, formattedParams, this.requestOptions); + } + async embedContent(request) { + const formattedParams = formatEmbedContentInput(request); + return embedContent(this.apiKey, this.model, formattedParams, this.requestOptions); + } + async batchEmbedContents(batchEmbedContentRequest) { + return batchEmbedContents(this.apiKey, this.model, batchEmbedContentRequest, this.requestOptions); + } +}; +var GoogleGenerativeAI = class { + constructor(apiKey2) { + this.apiKey = apiKey2; + } + getGenerativeModel(modelParams, requestOptions) { + if (!modelParams.model) { + throw new GoogleGenerativeAIError(`Must provide a model name. Example: genai.getGenerativeModel({ model: 'my-model-name' })`); + } + return new GenerativeModel(this.apiKey, modelParams, requestOptions); + } +}; + +// src/engine/gemini.ts +var GeminiAi = class { + constructor() { + this.config = getConfig(); + const config8 = getConfig(); + const apiKey2 = config8?.OCO_API_KEY || config8?.OCO_OPENAI_API_KEY; + this.genAi = new GoogleGenerativeAI(apiKey2); + const isGeminiModel = (config8?.OCO_AI_PROVIDER).trim().toLowerCase() === "gemini"; + const [command2, mode2] = process.argv.slice(2); + if (!apiKey2 && command2 !== "config" && mode2 !== "set" /* set */ && !isGeminiModel) { + ae("opencommit"); + ce( + "OCO_API_KEY is not set, please run `oco config set OCO_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`" + ); + ce( + "For help look into README https://github.com/di-sukharev/opencommit#setup" + ); + process.exit(1); + } + const VALID_MODELS = ["gemini-1.0-pro-latest", "gemini-pro", "gemini-1.0-pro-001", "gemini-1.5-pro-latest"]; + const DEFAULT_MODEL = "gemini-1.5-pro-latest"; + const MODEL2 = (config8?.OCO_MODEL || DEFAULT_MODEL).trim().toLowerCase(); + if (!VALID_MODELS.includes(MODEL2)) { + ae("opencommit"); + ce( + `OCO_MODEL is not set to a valid model. Please run 'oco config set OCO_MODEL=${DEFAULT_MODEL}'` + ); + ce("Valid models are: " + VALID_MODELS.join(", ")); + process.exit(1); + } + this.model = this.genAi.getGenerativeModel({ + model: MODEL2, + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + category: HarmCategory.HARM_CATEGORY_HARASSMENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + } + ] + }); + } + get MAX_TOKENS_OUTPUT() { + return this.config?.OCO_TOKENS_MAX_OUTPUT || 30720; + } + get MAX_TOKENS_INPUT() { + return this.config?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */; + } + async generateCommitMessage(messages) { + const systemMessages = []; + const history = { + user: [], + model: [] + }; + systemMessages.push(...messages.map((m4) => { + if (m4.role === "system") { + return { text: `${m4.content} + + + The commit message should start with a single subject line that is a brief overview description that summarizes all of the changes. It should not + exceed 50 characters and should be capitalized and written in the imperative mood. The subject line should be followed by a blank line and then the body of the commit message. + + + The body of the commit should provide more details about the changes made. Each commit message should be a single logical change. + + + Here's an example of a well-formed commit message: + + + Adds support for the Gemini language model engine, allowing users to utilize Gemini for generating commit messages. + + + \u2728 (utils/engine.ts): add support for Gemini engine + + \u267B\uFE0F (openAi.ts & utils/engine.ts): add support for OCO_API_KEY env variable to configure apiKey + + ` }; + } + if (m4.role === "user") { + return { text: `This is an example of a git diff --staged command output, it should not be included in the commit message: + +${m4.content}` }; + } + return { text: m4.content }; + })); + let prompt = [ + { + role: "user", + parts: [...history.user] + }, + { + role: "model", + parts: [...history.model] + } + ]; + const requestTokens = await this.model.countTokens(prompt.map((p4) => p4.parts.join("\n"))); + const tokenLimit = Math.abs(this.MAX_TOKENS_INPUT - this.MAX_TOKENS_OUTPUT); + if (requestTokens.totalTokens > tokenLimit) { + throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */); + } + try { + const chat = await this.model.startChat({ + systemInstruction: { role: "system", parts: systemMessages } + }); + const result = await chat.sendMessage([ + { text: "You MUST NEVER include any of the output from the `git diff --staged` command in your commit message, and you can ignore changes inside of the `out` directory." }, + { text: `You should include a brief summary of changes to each file in the 'git diff --staged' output as part of the commit message.` }, + { text: "Lastly, please do not include contextual information explaining new libraries or tools that were added to the project. This information is not necessary for the commit message. The commit message should concisely focus on the changes made to the codebase." } + ]); + const response = await result.response; + const answer = response.text(); + return answer; + } catch (err) { + const error = err; + ce(`${source_default.red("\u2716")} ${err?.message || err}`); + throw error; + } + } }; -var api = new OpenAi(); // src/engine/ollama.ts var OllamaAi = class { @@ -22027,15 +22933,16 @@ var OllamaAi = class { } } }; -var ollamaAi = new OllamaAi(); // src/utils/engine.ts function getEngine() { const config8 = getConfig(); if (config8?.OCO_AI_PROVIDER == "ollama") { - return ollamaAi; + return new OllamaAi(); + } else if (config8?.OCO_AI_PROVIDER == "gemini") { + return new GeminiAi(); } - return api; + return new OpenAi(); } // src/modules/commitlint/config.ts @@ -22090,6 +22997,7 @@ var IDENTITY = "You are to act as the author of a commit message in git."; var INIT_MAIN_PROMPT2 = (language, fullGitMojiSpec) => ({ role: import_openai3.ChatCompletionRequestMessageRoleEnum.System, content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${fullGitMojiSpec ? "GitMoji specification" : "conventional commit convention"} and explain WHAT were the changes and mainly WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message. + You should only include the changes that are relevant to the commit. Do not include any changes that are not relevant to the commit. ${config5?.OCO_EMOJI ? `Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description): \u{1F41B}, Fix a bug; \u2728, Introduce new features; \u{1F4DD}, Add or update documentation; \u{1F680}, Deploy stuff; \u2705, Add, update, or pass tests; \u267B\uFE0F, Refactor code; \u2B06\uFE0F, Upgrade dependencies; \u{1F527}, Add or update configuration files; \u{1F310}, Internationalization and localization; \u{1F4A1}, Add or update comments in source code; ${fullGitMojiSpec ? "\u{1F3A8}, Improve structure / format of the code; \u26A1\uFE0F, Improve performance; \u{1F525}, Remove code or files; \u{1F691}\uFE0F, Critical hotfix; \u{1F484}, Add or update the UI and style files; \u{1F389}, Begin a project; \u{1F512}\uFE0F, Fix security issues; \u{1F510}, Add or update secrets; \u{1F516}, Release / Version tags; \u{1F6A8}, Fix compiler / linter warnings; \u{1F6A7}, Work in progress; \u{1F49A}, Fix CI Build; \u2B07\uFE0F, Downgrade dependencies; \u{1F4CC}, Pin dependencies to specific versions; \u{1F477}, Add or update CI build system; \u{1F4C8}, Add or update analytics or track code; \u2795, Add a dependency; \u2796, Remove a dependency; \u{1F528}, Add or update development scripts; \u270F\uFE0F, Fix typos; \u{1F4A9}, Write bad code that needs to be improved; \u23EA\uFE0F, Revert changes; \u{1F500}, Merge branches; \u{1F4E6}\uFE0F, Add or update compiled files or packages; \u{1F47D}\uFE0F, Update code due to external API changes; \u{1F69A}, Move or rename resources (e.g.: files, paths, routes); \u{1F4C4}, Add or update license; \u{1F4A5}, Introduce breaking changes; \u{1F371}, Add or update assets; \u267F\uFE0F, Improve accessibility; \u{1F37B}, Write code drunkenly; \u{1F4AC}, Add or update text and literals; \u{1F5C3}\uFE0F, Perform database related changes; \u{1F50A}, Add or update logs; \u{1F507}, Remove logs; \u{1F465}, Add or update contributor(s); \u{1F6B8}, Improve user experience / usability; \u{1F3D7}\uFE0F, Make architectural changes; \u{1F4F1}, Work on responsive design; \u{1F921}, Mock things; \u{1F95A}, Add or update an easter egg; \u{1F648}, Add or update a .gitignore file; \u{1F4F8}, Add or update snapshots; \u2697\uFE0F, Perform experiments; \u{1F50D}\uFE0F, Improve SEO; \u{1F3F7}\uFE0F, Add or update types; \u{1F331}, Add or update seed files; \u{1F6A9}, Add, update, or remove feature flags; \u{1F945}, Catch errors; \u{1F4AB}, Add or update animations and transitions; \u{1F5D1}\uFE0F, Deprecate code that needs to be cleaned up; \u{1F6C2}, Work on code related to authorization, roles and permissions; \u{1FA79}, Simple fix for a non-critical issue; \u{1F9D0}, Data exploration/inspection; \u26B0\uFE0F, Remove dead code; \u{1F9EA}, Add a failing test; \u{1F454}, Add or update business logic; \u{1FA7A}, Add or update healthcheck; \u{1F9F1}, Infrastructure related changes; \u{1F9D1}\u200D\u{1F4BB}, Improve developer experience; \u{1F4B8}, Add sponsorships or money related infrastructure; \u{1F9F5}, Add or update code related to multithreading or concurrency; \u{1F9BA}, Add or update code related to validation." : ""}` : "Do not preface the commit with anything. Conventional commit keywords:fix, feat, build, chore, ci, docs, style, refactor, perf, test."} ${config5?.OCO_DESCRIPTION ? `Add a short description of WHY the changes are done after the commit message. Don't start it with "This commit", just describe the changes.` : "Don't add any descriptions to the commit, only commit message."} ${config5?.OCO_ONE_LINE_COMMIT ? "Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change." : ""} @@ -22194,7 +23102,7 @@ var GenerateCommitMessageErrorEnum = ((GenerateCommitMessageErrorEnum2) => { return GenerateCommitMessageErrorEnum2; })(GenerateCommitMessageErrorEnum || {}); var ADJUSTMENT_FACTOR = 20; -var generateCommitMessageByDiff = async (diff, fullGitMojiSpec) => { +var generateCommitMessageByDiff = async (diff, fullGitMojiSpec = false) => { try { const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec); const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map( @@ -22207,11 +23115,9 @@ var generateCommitMessageByDiff = async (diff, fullGitMojiSpec) => { MAX_REQUEST_TOKENS, fullGitMojiSpec ); - const commitMessages = []; - for (const promise of commitMessagePromises) { - commitMessages.push(await promise); - await delay(2e3); - } + let commitMessages = []; + const messages2 = await Promise.all(commitMessagePromises); + commitMessages = [...commitMessages, ...messages2]; return commitMessages.join("\n\n"); } const messages = await generateCommitMessageChatCompletionPrompt(diff, fullGitMojiSpec); @@ -22303,9 +23209,6 @@ var getCommitMsgsPromisesFromFileDiffs = async (diff, maxDiffLength, fullGitMoji } return commitMessagePromises; }; -function delay(ms) { - return new Promise((resolve) => setTimeout(resolve, ms)); -} // src/utils/git.ts var import_fs2 = require("fs"); @@ -22711,7 +23614,7 @@ var prepareCommitMessageHook = async (isStageAllFlag = false) => { return; ae("opencommit"); const config8 = getConfig(); - if (!config8?.OCO_OPENAI_API_KEY) { + if (!config8?.OCO_OPENAI_API_KEY && !!config8?.OCO_API_KEY) { throw new Error( "No OPEN_AI_API exists. Set your OPEN_AI_API= in ~/.opencommit" ); @@ -22798,3 +23701,19 @@ Z2( * Copyright(c) 2015 Douglas Christopher Wilson * MIT Licensed */ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/out/github-action.cjs b/out/github-action.cjs index 967d82a3..55912f39 100644 --- a/out/github-action.cjs +++ b/out/github-action.cjs @@ -1093,11 +1093,11 @@ var require_lib = __commonJS({ HttpCodes2[HttpCodes2["ServiceUnavailable"] = 503] = "ServiceUnavailable"; HttpCodes2[HttpCodes2["GatewayTimeout"] = 504] = "GatewayTimeout"; })(HttpCodes = exports.HttpCodes || (exports.HttpCodes = {})); - var Headers; - (function(Headers2) { - Headers2["Accept"] = "accept"; - Headers2["ContentType"] = "content-type"; - })(Headers = exports.Headers || (exports.Headers = {})); + var Headers2; + (function(Headers3) { + Headers3["Accept"] = "accept"; + Headers3["ContentType"] = "content-type"; + })(Headers2 = exports.Headers || (exports.Headers = {})); var MediaTypes; (function(MediaTypes2) { MediaTypes2["ApplicationJson"] = "application/json"; @@ -1235,7 +1235,7 @@ var require_lib = __commonJS({ } getJson(requestUrl, additionalHeaders = {}) { return __awaiter(this, void 0, void 0, function* () { - additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.Accept, MediaTypes.ApplicationJson); const res = yield this.get(requestUrl, additionalHeaders); return this._processResponse(res, this.requestOptions); }); @@ -1243,8 +1243,8 @@ var require_lib = __commonJS({ postJson(requestUrl, obj, additionalHeaders = {}) { return __awaiter(this, void 0, void 0, function* () { const data = JSON.stringify(obj, null, 2); - additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); - additionalHeaders[Headers.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.ContentType, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.ContentType, MediaTypes.ApplicationJson); const res = yield this.post(requestUrl, data, additionalHeaders); return this._processResponse(res, this.requestOptions); }); @@ -1252,8 +1252,8 @@ var require_lib = __commonJS({ putJson(requestUrl, obj, additionalHeaders = {}) { return __awaiter(this, void 0, void 0, function* () { const data = JSON.stringify(obj, null, 2); - additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); - additionalHeaders[Headers.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.ContentType, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.ContentType, MediaTypes.ApplicationJson); const res = yield this.put(requestUrl, data, additionalHeaders); return this._processResponse(res, this.requestOptions); }); @@ -1261,8 +1261,8 @@ var require_lib = __commonJS({ patchJson(requestUrl, obj, additionalHeaders = {}) { return __awaiter(this, void 0, void 0, function* () { const data = JSON.stringify(obj, null, 2); - additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); - additionalHeaders[Headers.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.ContentType, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers2.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers2.ContentType, MediaTypes.ApplicationJson); const res = yield this.patch(requestUrl, data, additionalHeaders); return this._processResponse(res, this.requestOptions); }); @@ -6025,11 +6025,11 @@ var require_lib3 = __commonJS({ return void 0; } var MAP = Symbol("map"); - var Headers = class { + var Headers2 = class { constructor() { let init = arguments.length > 0 && arguments[0] !== void 0 ? arguments[0] : void 0; this[MAP] = /* @__PURE__ */ Object.create(null); - if (init instanceof Headers) { + if (init instanceof Headers2) { const rawHeaders = init.raw(); const headerNames = Object.keys(rawHeaders); for (const headerName of headerNames) { @@ -6081,13 +6081,13 @@ var require_lib3 = __commonJS({ } forEach(callback) { let thisArg = arguments.length > 1 && arguments[1] !== void 0 ? arguments[1] : void 0; - let pairs = getHeaders(this); + let pairs = getHeaders2(this); let i2 = 0; while (i2 < pairs.length) { var _pairs$i = pairs[i2]; const name = _pairs$i[0], value = _pairs$i[1]; callback.call(thisArg, value, name, this); - pairs = getHeaders(this); + pairs = getHeaders2(this); i2++; } } @@ -6137,14 +6137,14 @@ var require_lib3 = __commonJS({ return createHeadersIterator(this, "key+value"); } }; - Headers.prototype.entries = Headers.prototype[Symbol.iterator]; - Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + Headers2.prototype.entries = Headers2.prototype[Symbol.iterator]; + Object.defineProperty(Headers2.prototype, Symbol.toStringTag, { value: "Headers", writable: false, enumerable: false, configurable: true }); - Object.defineProperties(Headers.prototype, { + Object.defineProperties(Headers2.prototype, { get: { enumerable: true }, forEach: { enumerable: true }, set: { enumerable: true }, @@ -6155,7 +6155,7 @@ var require_lib3 = __commonJS({ values: { enumerable: true }, entries: { enumerable: true } }); - function getHeaders(headers) { + function getHeaders2(headers) { let kind = arguments.length > 1 && arguments[1] !== void 0 ? arguments[1] : "key+value"; const keys = Object.keys(headers[MAP]).sort(); return keys.map(kind === "key" ? function(k3) { @@ -6183,7 +6183,7 @@ var require_lib3 = __commonJS({ } var _INTERNAL = this[INTERNAL]; const target = _INTERNAL.target, kind = _INTERNAL.kind, index = _INTERNAL.index; - const values = getHeaders(target, kind); + const values = getHeaders2(target, kind); const len = values.length; if (index >= len) { return { @@ -6213,7 +6213,7 @@ var require_lib3 = __commonJS({ return obj; } function createHeadersLenient(obj) { - const headers = new Headers(); + const headers = new Headers2(); for (const name of Object.keys(obj)) { if (invalidTokenRegex.test(name)) { continue; @@ -6243,7 +6243,7 @@ var require_lib3 = __commonJS({ let opts = arguments.length > 1 && arguments[1] !== void 0 ? arguments[1] : {}; Body.call(this, body, opts); const status = opts.status || 200; - const headers = new Headers(opts.headers); + const headers = new Headers2(opts.headers); if (body != null && !headers.has("Content-Type")) { const contentType = extractContentType(body); if (contentType) { @@ -6345,7 +6345,7 @@ var require_lib3 = __commonJS({ timeout: init.timeout || input.timeout || 0, size: init.size || input.size || 0 }); - const headers = new Headers(init.headers || input.headers || {}); + const headers = new Headers2(init.headers || input.headers || {}); if (inputBody != null && !headers.has("Content-Type")) { const contentType = extractContentType(inputBody); if (contentType) { @@ -6406,7 +6406,7 @@ var require_lib3 = __commonJS({ }); function getNodeRequestOptions(request) { const parsedURL = request[INTERNALS$2].parsedURL; - const headers = new Headers(request[INTERNALS$2].headers); + const headers = new Headers2(request[INTERNALS$2].headers); if (!headers.has("Accept")) { headers.set("Accept", "*/*"); } @@ -6472,12 +6472,12 @@ var require_lib3 = __commonJS({ const dest = new URL$1(destination).protocol; return orig === dest; }; - function fetch(url2, opts) { - if (!fetch.Promise) { + function fetch2(url2, opts) { + if (!fetch2.Promise) { throw new Error("native promise missing, set fetch.Promise to your favorite alternative"); } - Body.Promise = fetch.Promise; - return new fetch.Promise(function(resolve, reject) { + Body.Promise = fetch2.Promise; + return new fetch2.Promise(function(resolve, reject) { const request = new Request(url2, opts); const options = getNodeRequestOptions(request); const send = (options.protocol === "https:" ? https2 : http2).request; @@ -6550,7 +6550,7 @@ var require_lib3 = __commonJS({ req.on("response", function(res) { clearTimeout(reqTimeout); const headers = createHeadersLenient(res.headers); - if (fetch.isRedirect(res.statusCode)) { + if (fetch2.isRedirect(res.statusCode)) { const location = headers.get("Location"); let locationURL = null; try { @@ -6586,7 +6586,7 @@ var require_lib3 = __commonJS({ return; } const requestOpts = { - headers: new Headers(request.headers), + headers: new Headers2(request.headers), follow: request.follow, counter: request.counter + 1, agent: request.agent, @@ -6612,7 +6612,7 @@ var require_lib3 = __commonJS({ requestOpts.body = void 0; requestOpts.headers.delete("content-length"); } - resolve(fetch(new Request(locationURL, requestOpts))); + resolve(fetch2(new Request(locationURL, requestOpts))); finalize(); return; } @@ -6705,14 +6705,14 @@ var require_lib3 = __commonJS({ stream4.end(); } } - fetch.isRedirect = function(code) { + fetch2.isRedirect = function(code) { return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; }; - fetch.Promise = global.Promise; - module2.exports = exports = fetch; + fetch2.Promise = global.Promise; + module2.exports = exports = fetch2; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = exports; - exports.Headers = Headers; + exports.Headers = Headers2; exports.Request = Request; exports.Response = Response; exports.FetchError = FetchError; @@ -6892,8 +6892,8 @@ var require_dist_node5 = __commonJS({ let headers = {}; let status; let url2; - const fetch = requestOptions.request && requestOptions.request.fetch || nodeFetch; - return fetch(requestOptions.url, Object.assign( + const fetch2 = requestOptions.request && requestOptions.request.fetch || nodeFetch; + return fetch2(requestOptions.url, Object.assign( { method: requestOptions.method, body: requestOptions.body, @@ -8356,16 +8356,16 @@ var require_dist_node9 = __commonJS({ return Object.assign(withDecorations, requestWithDefaults); } function restEndpointMethods(octokit2) { - const api2 = endpointsToMethods(octokit2, Endpoints); + const api = endpointsToMethods(octokit2, Endpoints); return { - rest: api2 + rest: api }; } restEndpointMethods.VERSION = VERSION3; function legacyRestEndpointMethods(octokit2) { - const api2 = endpointsToMethods(octokit2, Endpoints); - return _objectSpread2(_objectSpread2({}, api2), {}, { - rest: api2 + const api = endpointsToMethods(octokit2, Endpoints); + return _objectSpread2(_objectSpread2({}, api), {}, { + rest: api }); } legacyRestEndpointMethods.VERSION = VERSION3; @@ -24153,25 +24153,29 @@ var validateConfig = (key, condition, validationMessage) => { process.exit(1); } }; -var configValidators = { - ["OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */](value, config7 = {}) { - validateConfig( - "API_KEY", - value || config7.OCO_AI_PROVIDER == "ollama", - "You need to provide an API key" - ); +var apiKeyValidator = (value, config7 = {}) => { + validateConfig( + "API_KEY", + value || process.env.OCO_OPENAI_API_KEY || process.env.OCO_API_KEY || config7.OCO_AI_PROVIDER == "ollama" || config7.OCO_AI_PROVIDER == "gemini", + "You need to provide an API key" + ); + if (config7.OCO_AI_PROVIDER != "gemini") { validateConfig( - "OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */, + "OCO_API_KEY" /* OCO_API_KEY */, value.startsWith("sk-"), 'Must start with "sk-"' ); validateConfig( - "OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */, + "OCO_API_KEY" /* OCO_API_KEY */, config7["OCO_OPENAI_BASE_PATH" /* OCO_OPENAI_BASE_PATH */] || value.length === 51, "Must be 51 characters long" ); - return value; - }, + } + return value; +}; +var configValidators = { + ["OCO_API_KEY" /* OCO_API_KEY */]: apiKeyValidator, + ["OCO_OPENAI_API_KEY" /* OCO_OPENAI_API_KEY */]: apiKeyValidator, ["OCO_DESCRIPTION" /* OCO_DESCRIPTION */](value) { validateConfig( "OCO_DESCRIPTION" /* OCO_DESCRIPTION */, @@ -24236,19 +24240,21 @@ var configValidators = { ); return value; }, - ["OCO_MODEL" /* OCO_MODEL */](value) { - validateConfig( - "OCO_MODEL" /* OCO_MODEL */, - [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125", - "gpt-4", - "gpt-4-1106-preview", - "gpt-4-turbo-preview", - "gpt-4-0125-preview" - ].includes(value), - `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` - ); + ["OCO_MODEL" /* OCO_MODEL */](value, config7) { + if (config7.OCO_AI_PROVIDER != "gemini") { + validateConfig( + "OCO_MODEL" /* OCO_MODEL */, + [ + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125", + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-turbo-preview", + "gpt-4-0125-preview" + ].includes(value), + `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` + ); + } return value; }, ["OCO_MESSAGE_TEMPLATE_PLACEHOLDER" /* OCO_MESSAGE_TEMPLATE_PLACEHOLDER */](value) { @@ -24273,7 +24279,8 @@ var configValidators = { [ "", "openai", - "ollama" + "ollama", + "gemini" ].includes(value), `${value} is not supported yet, use 'ollama' or 'openai' (default)` ); @@ -24291,7 +24298,7 @@ var configValidators = { var configPath = (0, import_path.join)((0, import_os.homedir)(), ".opencommit"); var getConfig = () => { const configFromEnv = { - OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY, + OCO_API_KEY: process.env.OCO_API_KEY || process.env.OCO_OPENAI_API_KEY, OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT ? Number(process.env.OCO_TOKENS_MAX_INPUT) : void 0, OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT ? Number(process.env.OCO_TOKENS_MAX_OUTPUT) : void 0, OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH, @@ -27438,13 +27445,13 @@ var config3 = getConfig(); var MAX_TOKENS_OUTPUT = config3?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */; var MAX_TOKENS_INPUT = config3?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */; var basePath = config3?.OCO_OPENAI_BASE_PATH; -var apiKey = config3?.OCO_OPENAI_API_KEY; +var apiKey = config3?.OCO_API_KEY || config3?.OCO_OPENAI_API_KEY; var [command, mode] = process.argv.slice(2); var isLocalModel = config3?.OCO_AI_PROVIDER == "ollama"; if (!apiKey && command !== "config" && mode !== "set" /* set */ && !isLocalModel) { ae("opencommit"); ce( - "OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_OPENAI_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`" + "OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`" ); ce( "For help look into README https://github.com/di-sukharev/opencommit#setup" @@ -27453,49 +27460,947 @@ if (!apiKey && command !== "config" && mode !== "set" /* set */ && !isLocalModel } var MODEL = config3?.OCO_MODEL || "gpt-3.5-turbo"; var OpenAi = class { - openAiApiConfiguration = new import_openai2.Configuration({ - apiKey - }); - openAI; constructor() { + this.openAiApiConfiguration = new import_openai2.Configuration({ + apiKey + }); + this.generateCommitMessage = async (messages) => { + const params = { + model: MODEL, + messages, + temperature: 0, + top_p: 0.1, + max_tokens: MAX_TOKENS_OUTPUT + }; + try { + const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b2) => a2 + b2, 0); + if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { + throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */); + } + const { data } = await this.openAI.createChatCompletion(params); + const message = data.choices[0].message; + return message?.content; + } catch (error) { + ce(`${source_default.red("\u2716")} ${JSON.stringify(params)}`); + const err = error; + ce(`${source_default.red("\u2716")} ${err?.message || err}`); + if (axios_default.isAxiosError(error) && error.response?.status === 401) { + const openAiError = error.response.data.error; + if (openAiError?.message) + ce(openAiError.message); + ce( + "For help look into README https://github.com/di-sukharev/opencommit#setup" + ); + } + throw err; + } + }; if (basePath) { this.openAiApiConfiguration.basePath = basePath; } this.openAI = new import_openai2.OpenAIApi(this.openAiApiConfiguration); } - generateCommitMessage = async (messages) => { - const params = { - model: MODEL, - messages, - temperature: 0, - top_p: 0.1, - max_tokens: MAX_TOKENS_OUTPUT - }; +}; + +// node_modules/@google/generative-ai/dist/index.mjs +var POSSIBLE_ROLES = ["user", "model", "function", "system"]; +var HarmCategory; +(function(HarmCategory2) { + HarmCategory2["HARM_CATEGORY_UNSPECIFIED"] = "HARM_CATEGORY_UNSPECIFIED"; + HarmCategory2["HARM_CATEGORY_HATE_SPEECH"] = "HARM_CATEGORY_HATE_SPEECH"; + HarmCategory2["HARM_CATEGORY_SEXUALLY_EXPLICIT"] = "HARM_CATEGORY_SEXUALLY_EXPLICIT"; + HarmCategory2["HARM_CATEGORY_HARASSMENT"] = "HARM_CATEGORY_HARASSMENT"; + HarmCategory2["HARM_CATEGORY_DANGEROUS_CONTENT"] = "HARM_CATEGORY_DANGEROUS_CONTENT"; +})(HarmCategory || (HarmCategory = {})); +var HarmBlockThreshold; +(function(HarmBlockThreshold2) { + HarmBlockThreshold2["HARM_BLOCK_THRESHOLD_UNSPECIFIED"] = "HARM_BLOCK_THRESHOLD_UNSPECIFIED"; + HarmBlockThreshold2["BLOCK_LOW_AND_ABOVE"] = "BLOCK_LOW_AND_ABOVE"; + HarmBlockThreshold2["BLOCK_MEDIUM_AND_ABOVE"] = "BLOCK_MEDIUM_AND_ABOVE"; + HarmBlockThreshold2["BLOCK_ONLY_HIGH"] = "BLOCK_ONLY_HIGH"; + HarmBlockThreshold2["BLOCK_NONE"] = "BLOCK_NONE"; +})(HarmBlockThreshold || (HarmBlockThreshold = {})); +var HarmProbability; +(function(HarmProbability2) { + HarmProbability2["HARM_PROBABILITY_UNSPECIFIED"] = "HARM_PROBABILITY_UNSPECIFIED"; + HarmProbability2["NEGLIGIBLE"] = "NEGLIGIBLE"; + HarmProbability2["LOW"] = "LOW"; + HarmProbability2["MEDIUM"] = "MEDIUM"; + HarmProbability2["HIGH"] = "HIGH"; +})(HarmProbability || (HarmProbability = {})); +var BlockReason; +(function(BlockReason2) { + BlockReason2["BLOCKED_REASON_UNSPECIFIED"] = "BLOCKED_REASON_UNSPECIFIED"; + BlockReason2["SAFETY"] = "SAFETY"; + BlockReason2["OTHER"] = "OTHER"; +})(BlockReason || (BlockReason = {})); +var FinishReason; +(function(FinishReason2) { + FinishReason2["FINISH_REASON_UNSPECIFIED"] = "FINISH_REASON_UNSPECIFIED"; + FinishReason2["STOP"] = "STOP"; + FinishReason2["MAX_TOKENS"] = "MAX_TOKENS"; + FinishReason2["SAFETY"] = "SAFETY"; + FinishReason2["RECITATION"] = "RECITATION"; + FinishReason2["OTHER"] = "OTHER"; +})(FinishReason || (FinishReason = {})); +var TaskType; +(function(TaskType2) { + TaskType2["TASK_TYPE_UNSPECIFIED"] = "TASK_TYPE_UNSPECIFIED"; + TaskType2["RETRIEVAL_QUERY"] = "RETRIEVAL_QUERY"; + TaskType2["RETRIEVAL_DOCUMENT"] = "RETRIEVAL_DOCUMENT"; + TaskType2["SEMANTIC_SIMILARITY"] = "SEMANTIC_SIMILARITY"; + TaskType2["CLASSIFICATION"] = "CLASSIFICATION"; + TaskType2["CLUSTERING"] = "CLUSTERING"; +})(TaskType || (TaskType = {})); +var FunctionCallingMode; +(function(FunctionCallingMode2) { + FunctionCallingMode2["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED"; + FunctionCallingMode2["AUTO"] = "AUTO"; + FunctionCallingMode2["ANY"] = "ANY"; + FunctionCallingMode2["NONE"] = "NONE"; +})(FunctionCallingMode || (FunctionCallingMode = {})); +var FunctionDeclarationSchemaType; +(function(FunctionDeclarationSchemaType2) { + FunctionDeclarationSchemaType2["STRING"] = "STRING"; + FunctionDeclarationSchemaType2["NUMBER"] = "NUMBER"; + FunctionDeclarationSchemaType2["INTEGER"] = "INTEGER"; + FunctionDeclarationSchemaType2["BOOLEAN"] = "BOOLEAN"; + FunctionDeclarationSchemaType2["ARRAY"] = "ARRAY"; + FunctionDeclarationSchemaType2["OBJECT"] = "OBJECT"; +})(FunctionDeclarationSchemaType || (FunctionDeclarationSchemaType = {})); +var GoogleGenerativeAIError = class extends Error { + constructor(message) { + super(`[GoogleGenerativeAI Error]: ${message}`); + } +}; +var GoogleGenerativeAIResponseError = class extends GoogleGenerativeAIError { + constructor(message, response) { + super(message); + this.response = response; + } +}; +var GoogleGenerativeAIFetchError = class extends GoogleGenerativeAIError { + constructor(message, status, statusText, errorDetails) { + super(message); + this.status = status; + this.statusText = statusText; + this.errorDetails = errorDetails; + } +}; +var GoogleGenerativeAIRequestInputError = class extends GoogleGenerativeAIError { +}; +var DEFAULT_BASE_URL = "https://generativelanguage.googleapis.com"; +var DEFAULT_API_VERSION = "v1beta"; +var PACKAGE_VERSION = "0.10.0"; +var PACKAGE_LOG_HEADER = "genai-js"; +var Task; +(function(Task2) { + Task2["GENERATE_CONTENT"] = "generateContent"; + Task2["STREAM_GENERATE_CONTENT"] = "streamGenerateContent"; + Task2["COUNT_TOKENS"] = "countTokens"; + Task2["EMBED_CONTENT"] = "embedContent"; + Task2["BATCH_EMBED_CONTENTS"] = "batchEmbedContents"; +})(Task || (Task = {})); +var RequestUrl = class { + constructor(model, task, apiKey2, stream4, requestOptions) { + this.model = model; + this.task = task; + this.apiKey = apiKey2; + this.stream = stream4; + this.requestOptions = requestOptions; + } + toString() { + var _a, _b; + const apiVersion = ((_a = this.requestOptions) === null || _a === void 0 ? void 0 : _a.apiVersion) || DEFAULT_API_VERSION; + const baseUrl = ((_b = this.requestOptions) === null || _b === void 0 ? void 0 : _b.baseUrl) || DEFAULT_BASE_URL; + let url2 = `${baseUrl}/${apiVersion}/${this.model}:${this.task}`; + if (this.stream) { + url2 += "?alt=sse"; + } + return url2; + } +}; +function getClientHeaders(requestOptions) { + const clientHeaders = []; + if (requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.apiClient) { + clientHeaders.push(requestOptions.apiClient); + } + clientHeaders.push(`${PACKAGE_LOG_HEADER}/${PACKAGE_VERSION}`); + return clientHeaders.join(" "); +} +async function getHeaders(url2) { + const headers = new Headers(); + headers.append("Content-Type", "application/json"); + headers.append("x-goog-api-client", getClientHeaders(url2.requestOptions)); + headers.append("x-goog-api-key", url2.apiKey); + let customHeaders = url2.requestOptions.customHeaders; + if (customHeaders) { + if (!(customHeaders instanceof Headers)) { + try { + customHeaders = new Headers(customHeaders); + } catch (e2) { + throw new GoogleGenerativeAIRequestInputError(`unable to convert customHeaders value ${JSON.stringify(customHeaders)} to Headers: ${e2.message}`); + } + } + for (const [headerName, headerValue] of customHeaders.entries()) { + if (headerName === "x-goog-api-key") { + throw new GoogleGenerativeAIRequestInputError(`Cannot set reserved header name ${headerName}`); + } else if (headerName === "x-goog-api-client") { + throw new GoogleGenerativeAIRequestInputError(`Header name ${headerName} can only be set using the apiClient field`); + } + headers.append(headerName, headerValue); + } + } + return headers; +} +async function constructRequest(model, task, apiKey2, stream4, body, requestOptions) { + const url2 = new RequestUrl(model, task, apiKey2, stream4, requestOptions); + return { + url: url2.toString(), + fetchOptions: Object.assign(Object.assign({}, buildFetchOptions(requestOptions)), { method: "POST", headers: await getHeaders(url2), body }) + }; +} +async function makeRequest(model, task, apiKey2, stream4, body, requestOptions) { + return _makeRequestInternal(model, task, apiKey2, stream4, body, requestOptions, fetch); +} +async function _makeRequestInternal(model, task, apiKey2, stream4, body, requestOptions, fetchFn = fetch) { + const url2 = new RequestUrl(model, task, apiKey2, stream4, requestOptions); + let response; + try { + const request = await constructRequest(model, task, apiKey2, stream4, body, requestOptions); + response = await fetchFn(request.url, request.fetchOptions); + if (!response.ok) { + let message = ""; + let errorDetails; + try { + const json = await response.json(); + message = json.error.message; + if (json.error.details) { + message += ` ${JSON.stringify(json.error.details)}`; + errorDetails = json.error.details; + } + } catch (e2) { + } + throw new GoogleGenerativeAIFetchError(`Error fetching from ${url2.toString()}: [${response.status} ${response.statusText}] ${message}`, response.status, response.statusText, errorDetails); + } + } catch (e2) { + let err = e2; + if (!(e2 instanceof GoogleGenerativeAIFetchError || e2 instanceof GoogleGenerativeAIRequestInputError)) { + err = new GoogleGenerativeAIError(`Error fetching from ${url2.toString()}: ${e2.message}`); + err.stack = e2.stack; + } + throw err; + } + return response; +} +function buildFetchOptions(requestOptions) { + const fetchOptions = {}; + if ((requestOptions === null || requestOptions === void 0 ? void 0 : requestOptions.timeout) >= 0) { + const abortController = new AbortController(); + const signal = abortController.signal; + setTimeout(() => abortController.abort(), requestOptions.timeout); + fetchOptions.signal = signal; + } + return fetchOptions; +} +function addHelpers(response) { + response.text = () => { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + console.warn(`This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response); + } + return getText(response); + } else if (response.promptFeedback) { + throw new GoogleGenerativeAIResponseError(`Text not available. ${formatBlockErrorMessage(response)}`, response); + } + return ""; + }; + response.functionCall = () => { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + console.warn(`This response had ${response.candidates.length} candidates. Returning function calls from the first candidate only. Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response); + } + console.warn(`response.functionCall() is deprecated. Use response.functionCalls() instead.`); + return getFunctionCalls(response)[0]; + } else if (response.promptFeedback) { + throw new GoogleGenerativeAIResponseError(`Function call not available. ${formatBlockErrorMessage(response)}`, response); + } + return void 0; + }; + response.functionCalls = () => { + if (response.candidates && response.candidates.length > 0) { + if (response.candidates.length > 1) { + console.warn(`This response had ${response.candidates.length} candidates. Returning function calls from the first candidate only. Access response.candidates directly to use the other candidates.`); + } + if (hadBadFinishReason(response.candidates[0])) { + throw new GoogleGenerativeAIResponseError(`${formatBlockErrorMessage(response)}`, response); + } + return getFunctionCalls(response); + } else if (response.promptFeedback) { + throw new GoogleGenerativeAIResponseError(`Function call not available. ${formatBlockErrorMessage(response)}`, response); + } + return void 0; + }; + return response; +} +function getText(response) { + var _a, _b, _c, _d; + if ((_d = (_c = (_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0].content) === null || _b === void 0 ? void 0 : _b.parts) === null || _c === void 0 ? void 0 : _c[0]) === null || _d === void 0 ? void 0 : _d.text) { + return response.candidates[0].content.parts.map(({ text }) => text).join(""); + } else { + return ""; + } +} +function getFunctionCalls(response) { + var _a, _b, _c, _d; + const functionCalls = []; + if ((_b = (_a = response.candidates) === null || _a === void 0 ? void 0 : _a[0].content) === null || _b === void 0 ? void 0 : _b.parts) { + for (const part of (_d = (_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0].content) === null || _d === void 0 ? void 0 : _d.parts) { + if (part.functionCall) { + functionCalls.push(part.functionCall); + } + } + } + if (functionCalls.length > 0) { + return functionCalls; + } else { + return void 0; + } +} +var badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY]; +function hadBadFinishReason(candidate) { + return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason); +} +function formatBlockErrorMessage(response) { + var _a, _b, _c; + let message = ""; + if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { + message += "Response was blocked"; + if ((_a = response.promptFeedback) === null || _a === void 0 ? void 0 : _a.blockReason) { + message += ` due to ${response.promptFeedback.blockReason}`; + } + if ((_b = response.promptFeedback) === null || _b === void 0 ? void 0 : _b.blockReasonMessage) { + message += `: ${response.promptFeedback.blockReasonMessage}`; + } + } else if ((_c = response.candidates) === null || _c === void 0 ? void 0 : _c[0]) { + const firstCandidate = response.candidates[0]; + if (hadBadFinishReason(firstCandidate)) { + message += `Candidate was blocked due to ${firstCandidate.finishReason}`; + if (firstCandidate.finishMessage) { + message += `: ${firstCandidate.finishMessage}`; + } + } + } + return message; +} +function __await(v2) { + return this instanceof __await ? (this.v = v2, this) : new __await(v2); +} +function __asyncGenerator(thisArg, _arguments, generator) { + if (!Symbol.asyncIterator) + throw new TypeError("Symbol.asyncIterator is not defined."); + var g = generator.apply(thisArg, _arguments || []), i2, q3 = []; + return i2 = {}, verb("next"), verb("throw"), verb("return"), i2[Symbol.asyncIterator] = function() { + return this; + }, i2; + function verb(n) { + if (g[n]) + i2[n] = function(v2) { + return new Promise(function(a2, b2) { + q3.push([n, v2, a2, b2]) > 1 || resume(n, v2); + }); + }; + } + function resume(n, v2) { try { - const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b2) => a2 + b2, 0); - if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) { - throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */); + step(g[n](v2)); + } catch (e2) { + settle2(q3[0][3], e2); + } + } + function step(r2) { + r2.value instanceof __await ? Promise.resolve(r2.value.v).then(fulfill, reject) : settle2(q3[0][2], r2); + } + function fulfill(value) { + resume("next", value); + } + function reject(value) { + resume("throw", value); + } + function settle2(f3, v2) { + if (f3(v2), q3.shift(), q3.length) + resume(q3[0][0], q3[0][1]); + } +} +var responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; +function processStream(response) { + const inputStream = response.body.pipeThrough(new TextDecoderStream("utf8", { fatal: true })); + const responseStream = getResponseStream(inputStream); + const [stream1, stream22] = responseStream.tee(); + return { + stream: generateResponseSequence(stream1), + response: getResponsePromise(stream22) + }; +} +async function getResponsePromise(stream4) { + const allResponses = []; + const reader = stream4.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + return addHelpers(aggregateResponses(allResponses)); + } + allResponses.push(value); + } +} +function generateResponseSequence(stream4) { + return __asyncGenerator(this, arguments, function* generateResponseSequence_1() { + const reader = stream4.getReader(); + while (true) { + const { value, done } = yield __await(reader.read()); + if (done) { + break; } - const { data } = await this.openAI.createChatCompletion(params); - const message = data.choices[0].message; - return message?.content; - } catch (error) { - ce(`${source_default.red("\u2716")} ${JSON.stringify(params)}`); - const err = error; - ce(`${source_default.red("\u2716")} ${err?.message || err}`); - if (axios_default.isAxiosError(error) && error.response?.status === 401) { - const openAiError = error.response.data.error; - if (openAiError?.message) - ce(openAiError.message); - ce( - "For help look into README https://github.com/di-sukharev/opencommit#setup" - ); + yield yield __await(addHelpers(value)); + } + }); +} +function getResponseStream(inputStream) { + const reader = inputStream.getReader(); + const stream4 = new ReadableStream({ + start(controller) { + let currentText = ""; + return pump(); + function pump() { + return reader.read().then(({ value, done }) => { + if (done) { + if (currentText.trim()) { + controller.error(new GoogleGenerativeAIError("Failed to parse stream")); + return; + } + controller.close(); + return; + } + currentText += value; + let match = currentText.match(responseLineRE); + let parsedResponse; + while (match) { + try { + parsedResponse = JSON.parse(match[1]); + } catch (e2) { + controller.error(new GoogleGenerativeAIError(`Error parsing JSON response: "${match[1]}"`)); + return; + } + controller.enqueue(parsedResponse); + currentText = currentText.substring(match[0].length); + match = currentText.match(responseLineRE); + } + return pump(); + }); } - throw err; } + }); + return stream4; +} +function aggregateResponses(responses) { + const lastResponse = responses[responses.length - 1]; + const aggregatedResponse = { + promptFeedback: lastResponse === null || lastResponse === void 0 ? void 0 : lastResponse.promptFeedback }; + for (const response of responses) { + if (response.candidates) { + for (const candidate of response.candidates) { + const i2 = candidate.index; + if (!aggregatedResponse.candidates) { + aggregatedResponse.candidates = []; + } + if (!aggregatedResponse.candidates[i2]) { + aggregatedResponse.candidates[i2] = { + index: candidate.index + }; + } + aggregatedResponse.candidates[i2].citationMetadata = candidate.citationMetadata; + aggregatedResponse.candidates[i2].finishReason = candidate.finishReason; + aggregatedResponse.candidates[i2].finishMessage = candidate.finishMessage; + aggregatedResponse.candidates[i2].safetyRatings = candidate.safetyRatings; + if (candidate.content && candidate.content.parts) { + if (!aggregatedResponse.candidates[i2].content) { + aggregatedResponse.candidates[i2].content = { + role: candidate.content.role || "user", + parts: [] + }; + } + const newPart = {}; + for (const part of candidate.content.parts) { + if (part.text) { + newPart.text = part.text; + } + if (part.functionCall) { + newPart.functionCall = part.functionCall; + } + if (Object.keys(newPart).length === 0) { + newPart.text = ""; + } + aggregatedResponse.candidates[i2].content.parts.push(newPart); + } + } + } + } + } + return aggregatedResponse; +} +async function generateContentStream(apiKey2, model, params, requestOptions) { + const response = await makeRequest( + model, + Task.STREAM_GENERATE_CONTENT, + apiKey2, + true, + JSON.stringify(params), + requestOptions + ); + return processStream(response); +} +async function generateContent(apiKey2, model, params, requestOptions) { + const response = await makeRequest( + model, + Task.GENERATE_CONTENT, + apiKey2, + false, + JSON.stringify(params), + requestOptions + ); + const responseJson = await response.json(); + const enhancedResponse = addHelpers(responseJson); + return { + response: enhancedResponse + }; +} +function formatSystemInstruction(input) { + if (input == null) { + return void 0; + } else if (typeof input === "string") { + return { role: "system", parts: [{ text: input }] }; + } else if (input.text) { + return { role: "system", parts: [input] }; + } else if (input.parts) { + if (!input.role) { + return { role: "system", parts: input.parts }; + } else { + return input; + } + } +} +function formatNewContent(request) { + let newParts = []; + if (typeof request === "string") { + newParts = [{ text: request }]; + } else { + for (const partOrString of request) { + if (typeof partOrString === "string") { + newParts.push({ text: partOrString }); + } else { + newParts.push(partOrString); + } + } + } + return assignRoleToPartsAndValidateSendMessageRequest(newParts); +} +function assignRoleToPartsAndValidateSendMessageRequest(parts) { + const userContent = { role: "user", parts: [] }; + const functionContent = { role: "function", parts: [] }; + let hasUserContent = false; + let hasFunctionContent = false; + for (const part of parts) { + if ("functionResponse" in part) { + functionContent.parts.push(part); + hasFunctionContent = true; + } else { + userContent.parts.push(part); + hasUserContent = true; + } + } + if (hasUserContent && hasFunctionContent) { + throw new GoogleGenerativeAIError("Within a single message, FunctionResponse cannot be mixed with other type of part in the request for sending chat message."); + } + if (!hasUserContent && !hasFunctionContent) { + throw new GoogleGenerativeAIError("No content is provided for sending chat message."); + } + if (hasUserContent) { + return userContent; + } + return functionContent; +} +function formatGenerateContentInput(params) { + let formattedRequest; + if (params.contents) { + formattedRequest = params; + } else { + const content = formatNewContent(params); + formattedRequest = { contents: [content] }; + } + if (params.systemInstruction) { + formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction); + } + return formattedRequest; +} +function formatEmbedContentInput(params) { + if (typeof params === "string" || Array.isArray(params)) { + const content = formatNewContent(params); + return { content }; + } + return params; +} +var VALID_PART_FIELDS = [ + "text", + "inlineData", + "functionCall", + "functionResponse" +]; +var VALID_PARTS_PER_ROLE = { + user: ["text", "inlineData"], + function: ["functionResponse"], + model: ["text", "functionCall"], + system: ["text"] +}; +var VALID_PREVIOUS_CONTENT_ROLES = { + user: ["model"], + function: ["model"], + model: ["user", "function"], + system: [] +}; +function validateChatHistory(history) { + let prevContent; + for (const currContent of history) { + const { role, parts } = currContent; + if (!prevContent && role !== "user") { + throw new GoogleGenerativeAIError(`First content should be with role 'user', got ${role}`); + } + if (!POSSIBLE_ROLES.includes(role)) { + throw new GoogleGenerativeAIError(`Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`); + } + if (!Array.isArray(parts)) { + throw new GoogleGenerativeAIError("Content should have 'parts' property with an array of Parts"); + } + if (parts.length === 0) { + throw new GoogleGenerativeAIError("Each Content should have at least one part"); + } + const countFields = { + text: 0, + inlineData: 0, + functionCall: 0, + functionResponse: 0, + fileData: 0 + }; + for (const part of parts) { + for (const key of VALID_PART_FIELDS) { + if (key in part) { + countFields[key] += 1; + } + } + } + const validParts = VALID_PARTS_PER_ROLE[role]; + for (const key of VALID_PART_FIELDS) { + if (!validParts.includes(key) && countFields[key] > 0) { + throw new GoogleGenerativeAIError(`Content with role '${role}' can't contain '${key}' part`); + } + } + if (prevContent) { + const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; + if (!validPreviousContentRoles.includes(prevContent.role)) { + throw new GoogleGenerativeAIError(`Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`); + } + } + prevContent = currContent; + } +} +var SILENT_ERROR = "SILENT_ERROR"; +var ChatSession = class { + constructor(apiKey2, model, params, requestOptions) { + this.model = model; + this.params = params; + this.requestOptions = requestOptions; + this._history = []; + this._sendPromise = Promise.resolve(); + this._apiKey = apiKey2; + if (params === null || params === void 0 ? void 0 : params.history) { + validateChatHistory(params.history); + this._history = params.history; + } + } + async getHistory() { + await this._sendPromise; + return this._history; + } + async sendMessage(request) { + var _a, _b, _c, _d, _e; + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: (_a = this.params) === null || _a === void 0 ? void 0 : _a.safetySettings, + generationConfig: (_b = this.params) === null || _b === void 0 ? void 0 : _b.generationConfig, + tools: (_c = this.params) === null || _c === void 0 ? void 0 : _c.tools, + toolConfig: (_d = this.params) === null || _d === void 0 ? void 0 : _d.toolConfig, + systemInstruction: (_e = this.params) === null || _e === void 0 ? void 0 : _e.systemInstruction, + contents: [...this._history, newContent] + }; + let finalResult; + this._sendPromise = this._sendPromise.then(() => generateContent(this._apiKey, this.model, generateContentRequest, this.requestOptions)).then((result) => { + var _a2; + if (result.response.candidates && result.response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = Object.assign({ + parts: [], + role: "model" + }, (_a2 = result.response.candidates) === null || _a2 === void 0 ? void 0 : _a2[0].content); + this._history.push(responseContent); + } else { + const blockErrorMessage = formatBlockErrorMessage(result.response); + if (blockErrorMessage) { + console.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + finalResult = result; + }); + await this._sendPromise; + return finalResult; + } + async sendMessageStream(request) { + var _a, _b, _c, _d, _e; + await this._sendPromise; + const newContent = formatNewContent(request); + const generateContentRequest = { + safetySettings: (_a = this.params) === null || _a === void 0 ? void 0 : _a.safetySettings, + generationConfig: (_b = this.params) === null || _b === void 0 ? void 0 : _b.generationConfig, + tools: (_c = this.params) === null || _c === void 0 ? void 0 : _c.tools, + toolConfig: (_d = this.params) === null || _d === void 0 ? void 0 : _d.toolConfig, + systemInstruction: (_e = this.params) === null || _e === void 0 ? void 0 : _e.systemInstruction, + contents: [...this._history, newContent] + }; + const streamPromise = generateContentStream(this._apiKey, this.model, generateContentRequest, this.requestOptions); + this._sendPromise = this._sendPromise.then(() => streamPromise).catch((_ignored) => { + throw new Error(SILENT_ERROR); + }).then((streamResult) => streamResult.response).then((response) => { + if (response.candidates && response.candidates.length > 0) { + this._history.push(newContent); + const responseContent = Object.assign({}, response.candidates[0].content); + if (!responseContent.role) { + responseContent.role = "model"; + } + this._history.push(responseContent); + } else { + const blockErrorMessage = formatBlockErrorMessage(response); + if (blockErrorMessage) { + console.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`); + } + } + }).catch((e2) => { + if (e2.message !== SILENT_ERROR) { + console.error(e2); + } + }); + return streamPromise; + } +}; +async function countTokens(apiKey2, model, params, requestOptions) { + const response = await makeRequest(model, Task.COUNT_TOKENS, apiKey2, false, JSON.stringify(Object.assign(Object.assign({}, params), { model })), requestOptions); + return response.json(); +} +async function embedContent(apiKey2, model, params, requestOptions) { + const response = await makeRequest(model, Task.EMBED_CONTENT, apiKey2, false, JSON.stringify(params), requestOptions); + return response.json(); +} +async function batchEmbedContents(apiKey2, model, params, requestOptions) { + const requestsWithModel = params.requests.map((request) => { + return Object.assign(Object.assign({}, request), { model }); + }); + const response = await makeRequest(model, Task.BATCH_EMBED_CONTENTS, apiKey2, false, JSON.stringify({ requests: requestsWithModel }), requestOptions); + return response.json(); +} +var GenerativeModel = class { + constructor(apiKey2, modelParams, requestOptions) { + this.apiKey = apiKey2; + if (modelParams.model.includes("/")) { + this.model = modelParams.model; + } else { + this.model = `models/${modelParams.model}`; + } + this.generationConfig = modelParams.generationConfig || {}; + this.safetySettings = modelParams.safetySettings || []; + this.tools = modelParams.tools; + this.toolConfig = modelParams.toolConfig; + this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction); + this.requestOptions = requestOptions || {}; + } + async generateContent(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContent(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction }, formattedParams), this.requestOptions); + } + async generateContentStream(request) { + const formattedParams = formatGenerateContentInput(request); + return generateContentStream(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction }, formattedParams), this.requestOptions); + } + startChat(startChatParams) { + return new ChatSession(this.apiKey, this.model, Object.assign({ generationConfig: this.generationConfig, safetySettings: this.safetySettings, tools: this.tools, toolConfig: this.toolConfig, systemInstruction: this.systemInstruction }, startChatParams), this.requestOptions); + } + async countTokens(request) { + const formattedParams = formatGenerateContentInput(request); + return countTokens(this.apiKey, this.model, formattedParams, this.requestOptions); + } + async embedContent(request) { + const formattedParams = formatEmbedContentInput(request); + return embedContent(this.apiKey, this.model, formattedParams, this.requestOptions); + } + async batchEmbedContents(batchEmbedContentRequest) { + return batchEmbedContents(this.apiKey, this.model, batchEmbedContentRequest, this.requestOptions); + } +}; +var GoogleGenerativeAI = class { + constructor(apiKey2) { + this.apiKey = apiKey2; + } + getGenerativeModel(modelParams, requestOptions) { + if (!modelParams.model) { + throw new GoogleGenerativeAIError(`Must provide a model name. Example: genai.getGenerativeModel({ model: 'my-model-name' })`); + } + return new GenerativeModel(this.apiKey, modelParams, requestOptions); + } +}; + +// src/engine/gemini.ts +var GeminiAi = class { + constructor() { + this.config = getConfig(); + const config7 = getConfig(); + const apiKey2 = config7?.OCO_API_KEY || config7?.OCO_OPENAI_API_KEY; + this.genAi = new GoogleGenerativeAI(apiKey2); + const isGeminiModel = (config7?.OCO_AI_PROVIDER).trim().toLowerCase() === "gemini"; + const [command2, mode2] = process.argv.slice(2); + if (!apiKey2 && command2 !== "config" && mode2 !== "set" /* set */ && !isGeminiModel) { + ae("opencommit"); + ce( + "OCO_API_KEY is not set, please run `oco config set OCO_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`" + ); + ce( + "For help look into README https://github.com/di-sukharev/opencommit#setup" + ); + process.exit(1); + } + const VALID_MODELS = ["gemini-1.0-pro-latest", "gemini-pro", "gemini-1.0-pro-001", "gemini-1.5-pro-latest"]; + const DEFAULT_MODEL = "gemini-1.5-pro-latest"; + const MODEL2 = (config7?.OCO_MODEL || DEFAULT_MODEL).trim().toLowerCase(); + if (!VALID_MODELS.includes(MODEL2)) { + ae("opencommit"); + ce( + `OCO_MODEL is not set to a valid model. Please run 'oco config set OCO_MODEL=${DEFAULT_MODEL}'` + ); + ce("Valid models are: " + VALID_MODELS.join(", ")); + process.exit(1); + } + this.model = this.genAi.getGenerativeModel({ + model: MODEL2, + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + category: HarmCategory.HARM_CATEGORY_HARASSMENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + } + ] + }); + } + get MAX_TOKENS_OUTPUT() { + return this.config?.OCO_TOKENS_MAX_OUTPUT || 30720; + } + get MAX_TOKENS_INPUT() { + return this.config?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */; + } + async generateCommitMessage(messages) { + const systemMessages = []; + const history = { + user: [], + model: [] + }; + systemMessages.push(...messages.map((m3) => { + if (m3.role === "system") { + return { text: `${m3.content} + + + The commit message should start with a single subject line that is a brief overview description that summarizes all of the changes. It should not + exceed 50 characters and should be capitalized and written in the imperative mood. The subject line should be followed by a blank line and then the body of the commit message. + + + The body of the commit should provide more details about the changes made. Each commit message should be a single logical change. + + + Here's an example of a well-formed commit message: + + + Adds support for the Gemini language model engine, allowing users to utilize Gemini for generating commit messages. + + + \u2728 (utils/engine.ts): add support for Gemini engine + + \u267B\uFE0F (openAi.ts & utils/engine.ts): add support for OCO_API_KEY env variable to configure apiKey + + ` }; + } + if (m3.role === "user") { + return { text: `This is an example of a git diff --staged command output, it should not be included in the commit message: + +${m3.content}` }; + } + return { text: m3.content }; + })); + let prompt = [ + { + role: "user", + parts: [...history.user] + }, + { + role: "model", + parts: [...history.model] + } + ]; + const requestTokens = await this.model.countTokens(prompt.map((p2) => p2.parts.join("\n"))); + const tokenLimit = Math.abs(this.MAX_TOKENS_INPUT - this.MAX_TOKENS_OUTPUT); + if (requestTokens.totalTokens > tokenLimit) { + throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */); + } + try { + const chat = await this.model.startChat({ + systemInstruction: { role: "system", parts: systemMessages } + }); + const result = await chat.sendMessage([ + { text: "You MUST NEVER include any of the output from the `git diff --staged` command in your commit message, and you can ignore changes inside of the `out` directory." }, + { text: `You should include a brief summary of changes to each file in the 'git diff --staged' output as part of the commit message.` }, + { text: "Lastly, please do not include contextual information explaining new libraries or tools that were added to the project. This information is not necessary for the commit message. The commit message should concisely focus on the changes made to the codebase." } + ]); + const response = await result.response; + const answer = response.text(); + return answer; + } catch (err) { + const error = err; + ce(`${source_default.red("\u2716")} ${err?.message || err}`); + throw error; + } + } }; -var api = new OpenAi(); // src/engine/ollama.ts var OllamaAi = class { @@ -27522,15 +28427,16 @@ var OllamaAi = class { } } }; -var ollamaAi = new OllamaAi(); // src/utils/engine.ts function getEngine() { const config7 = getConfig(); if (config7?.OCO_AI_PROVIDER == "ollama") { - return ollamaAi; + return new OllamaAi(); + } else if (config7?.OCO_AI_PROVIDER == "gemini") { + return new GeminiAi(); } - return api; + return new OpenAi(); } // src/modules/commitlint/config.ts @@ -27585,6 +28491,7 @@ var IDENTITY = "You are to act as the author of a commit message in git."; var INIT_MAIN_PROMPT2 = (language, fullGitMojiSpec) => ({ role: import_openai3.ChatCompletionRequestMessageRoleEnum.System, content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${fullGitMojiSpec ? "GitMoji specification" : "conventional commit convention"} and explain WHAT were the changes and mainly WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message. + You should only include the changes that are relevant to the commit. Do not include any changes that are not relevant to the commit. ${config5?.OCO_EMOJI ? `Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description): \u{1F41B}, Fix a bug; \u2728, Introduce new features; \u{1F4DD}, Add or update documentation; \u{1F680}, Deploy stuff; \u2705, Add, update, or pass tests; \u267B\uFE0F, Refactor code; \u2B06\uFE0F, Upgrade dependencies; \u{1F527}, Add or update configuration files; \u{1F310}, Internationalization and localization; \u{1F4A1}, Add or update comments in source code; ${fullGitMojiSpec ? "\u{1F3A8}, Improve structure / format of the code; \u26A1\uFE0F, Improve performance; \u{1F525}, Remove code or files; \u{1F691}\uFE0F, Critical hotfix; \u{1F484}, Add or update the UI and style files; \u{1F389}, Begin a project; \u{1F512}\uFE0F, Fix security issues; \u{1F510}, Add or update secrets; \u{1F516}, Release / Version tags; \u{1F6A8}, Fix compiler / linter warnings; \u{1F6A7}, Work in progress; \u{1F49A}, Fix CI Build; \u2B07\uFE0F, Downgrade dependencies; \u{1F4CC}, Pin dependencies to specific versions; \u{1F477}, Add or update CI build system; \u{1F4C8}, Add or update analytics or track code; \u2795, Add a dependency; \u2796, Remove a dependency; \u{1F528}, Add or update development scripts; \u270F\uFE0F, Fix typos; \u{1F4A9}, Write bad code that needs to be improved; \u23EA\uFE0F, Revert changes; \u{1F500}, Merge branches; \u{1F4E6}\uFE0F, Add or update compiled files or packages; \u{1F47D}\uFE0F, Update code due to external API changes; \u{1F69A}, Move or rename resources (e.g.: files, paths, routes); \u{1F4C4}, Add or update license; \u{1F4A5}, Introduce breaking changes; \u{1F371}, Add or update assets; \u267F\uFE0F, Improve accessibility; \u{1F37B}, Write code drunkenly; \u{1F4AC}, Add or update text and literals; \u{1F5C3}\uFE0F, Perform database related changes; \u{1F50A}, Add or update logs; \u{1F507}, Remove logs; \u{1F465}, Add or update contributor(s); \u{1F6B8}, Improve user experience / usability; \u{1F3D7}\uFE0F, Make architectural changes; \u{1F4F1}, Work on responsive design; \u{1F921}, Mock things; \u{1F95A}, Add or update an easter egg; \u{1F648}, Add or update a .gitignore file; \u{1F4F8}, Add or update snapshots; \u2697\uFE0F, Perform experiments; \u{1F50D}\uFE0F, Improve SEO; \u{1F3F7}\uFE0F, Add or update types; \u{1F331}, Add or update seed files; \u{1F6A9}, Add, update, or remove feature flags; \u{1F945}, Catch errors; \u{1F4AB}, Add or update animations and transitions; \u{1F5D1}\uFE0F, Deprecate code that needs to be cleaned up; \u{1F6C2}, Work on code related to authorization, roles and permissions; \u{1FA79}, Simple fix for a non-critical issue; \u{1F9D0}, Data exploration/inspection; \u26B0\uFE0F, Remove dead code; \u{1F9EA}, Add a failing test; \u{1F454}, Add or update business logic; \u{1FA7A}, Add or update healthcheck; \u{1F9F1}, Infrastructure related changes; \u{1F9D1}\u200D\u{1F4BB}, Improve developer experience; \u{1F4B8}, Add sponsorships or money related infrastructure; \u{1F9F5}, Add or update code related to multithreading or concurrency; \u{1F9BA}, Add or update code related to validation." : ""}` : "Do not preface the commit with anything. Conventional commit keywords:fix, feat, build, chore, ci, docs, style, refactor, perf, test."} ${config5?.OCO_DESCRIPTION ? `Add a short description of WHY the changes are done after the commit message. Don't start it with "This commit", just describe the changes.` : "Don't add any descriptions to the commit, only commit message."} ${config5?.OCO_ONE_LINE_COMMIT ? "Craft a concise commit message that encapsulates all changes made, with an emphasis on the primary updates. If the modifications share a common theme or scope, mention it succinctly; otherwise, leave the scope out to maintain focus. The goal is to provide a clear and unified overview of the changes in a one single message, without diverging into a list of commit per file change." : ""} @@ -27689,7 +28596,7 @@ var GenerateCommitMessageErrorEnum = ((GenerateCommitMessageErrorEnum2) => { return GenerateCommitMessageErrorEnum2; })(GenerateCommitMessageErrorEnum || {}); var ADJUSTMENT_FACTOR = 20; -var generateCommitMessageByDiff = async (diff, fullGitMojiSpec) => { +var generateCommitMessageByDiff = async (diff, fullGitMojiSpec = false) => { try { const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec); const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map( @@ -27702,11 +28609,9 @@ var generateCommitMessageByDiff = async (diff, fullGitMojiSpec) => { MAX_REQUEST_TOKENS, fullGitMojiSpec ); - const commitMessages = []; - for (const promise of commitMessagePromises) { - commitMessages.push(await promise); - await delay(2e3); - } + let commitMessages = []; + const messages2 = await Promise.all(commitMessagePromises); + commitMessages = [...commitMessages, ...messages2]; return commitMessages.join("\n\n"); } const messages = await generateCommitMessageChatCompletionPrompt(diff, fullGitMojiSpec); @@ -27798,9 +28703,6 @@ var getCommitMsgsPromisesFromFileDiffs = async (diff, maxDiffLength, fullGitMoji } return commitMessagePromises; }; -function delay(ms) { - return new Promise((resolve) => setTimeout(resolve, ms)); -} // src/utils/randomIntFromInterval.ts function randomIntFromInterval(min, max) { @@ -27973,3 +28875,19 @@ run(); * Copyright(c) 2015 Douglas Christopher Wilson * MIT Licensed */ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/package-lock.json b/package-lock.json index 1a26a7b2..5ea5f3af 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "opencommit", - "version": "3.0.11", + "version": "3.0.12", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "opencommit", - "version": "3.0.11", + "version": "3.0.12", "license": "MIT", "dependencies": { "@actions/core": "^1.10.0", @@ -14,6 +14,7 @@ "@actions/github": "^5.1.1", "@clack/prompts": "^0.6.1", "@dqbd/tiktoken": "^1.0.2", + "@google/generative-ai": "^0.10.0", "@octokit/webhooks-schemas": "^6.11.0", "@octokit/webhooks-types": "^6.11.0", "ai": "^2.2.14", @@ -335,6 +336,14 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, + "node_modules/@google/generative-ai": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.10.0.tgz", + "integrity": "sha512-fZJEL8DcDgvBCguLdaAdBBEoh+83LDXK3m9rVh5iksvwVJDgZqkpsLGKJuM5FEBKltWhbJ62WSyMEUGgy8eMUg==", + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.8", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", diff --git a/package.json b/package.json index 0b2fe31b..7bd3501b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "opencommit", - "version": "3.0.11", + "version": "3.0.12", "description": "Auto-generate impressive commits in 1 second. Killing lame commits with AI 🤯🔫", "keywords": [ "git", @@ -69,6 +69,7 @@ "@actions/github": "^5.1.1", "@clack/prompts": "^0.6.1", "@dqbd/tiktoken": "^1.0.2", + "@google/generative-ai": "^0.10.0", "@octokit/webhooks-schemas": "^6.11.0", "@octokit/webhooks-types": "^6.11.0", "ai": "^2.2.14", diff --git a/src/commands/config.ts b/src/commands/config.ts index 9bc2e3e9..dcc5fc78 100644 --- a/src/commands/config.ts +++ b/src/commands/config.ts @@ -14,6 +14,7 @@ import { getI18nLocal } from '../i18n'; dotenv.config(); export enum CONFIG_KEYS { + OCO_API_KEY = 'OCO_API_KEY', OCO_OPENAI_API_KEY = 'OCO_OPENAI_API_KEY', OCO_TOKENS_MAX_INPUT = 'OCO_TOKENS_MAX_INPUT', OCO_TOKENS_MAX_OUTPUT = 'OCO_TOKENS_MAX_OUTPUT', @@ -52,27 +53,55 @@ const validateConfig = ( } }; -export const configValidators = { - [CONFIG_KEYS.OCO_OPENAI_API_KEY](value: any, config: any = {}) { - //need api key unless running locally with ollama - validateConfig( - 'API_KEY', - value || config.OCO_AI_PROVIDER == 'ollama', - 'You need to provide an API key' - ); +const apiKeyValidator = (value: any, config: any = {}) => { + //need api key unless running locally with ollama + validateConfig( + 'API_KEY', + (value || process.env.OCO_OPENAI_API_KEY || process.env.OCO_API_KEY) || + config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'gemini', + 'You need to provide an API key' + ); + if (config.OCO_AI_PROVIDER != 'gemini') { validateConfig( - CONFIG_KEYS.OCO_OPENAI_API_KEY, + CONFIG_KEYS.OCO_API_KEY, value.startsWith('sk-'), 'Must start with "sk-"' ); validateConfig( - CONFIG_KEYS.OCO_OPENAI_API_KEY, + CONFIG_KEYS.OCO_API_KEY, config[CONFIG_KEYS.OCO_OPENAI_BASE_PATH] || value.length === 51, 'Must be 51 characters long' ); + } - return value; - }, + return value; +} + +export const configValidators = { + [CONFIG_KEYS.OCO_API_KEY]: apiKeyValidator, + [CONFIG_KEYS.OCO_OPENAI_API_KEY]: apiKeyValidator, + // [CONFIG_KEYS.OCO_API_KEY](value: any, config: any = {}) { + // //need api key unless running locally with ollama + // validateConfig( + // 'API_KEY', + // value || config.OCO_AI_PROVIDER == 'ollama' || config.OCO_AI_PROVIDER == 'gemini', + // 'You need to provide an API key' + // ); + // if (config.OCO_AI_PROVIDER != 'gemini') { + // validateConfig( + // CONFIG_KEYS.OCO_API_KEY, + // value.startsWith('sk-'), + // 'Must start with "sk-"' + // ); + // validateConfig( + // CONFIG_KEYS.OCO_API_KEY, + // config[CONFIG_KEYS.OCO_OPENAI_BASE_PATH] || value.length === 51, + // 'Must be 51 characters long' + // ); + // } + + // return value; + // }, [CONFIG_KEYS.OCO_DESCRIPTION](value: any) { validateConfig( @@ -150,19 +179,21 @@ export const configValidators = { return value; }, - [CONFIG_KEYS.OCO_MODEL](value: any) { - validateConfig( - CONFIG_KEYS.OCO_MODEL, - [ - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0125', - 'gpt-4', - 'gpt-4-1106-preview', - 'gpt-4-turbo-preview', - 'gpt-4-0125-preview' - ].includes(value), - `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` - ); + [CONFIG_KEYS.OCO_MODEL](value: any, config?: any) { + if (config.OCO_AI_PROVIDER != 'gemini') { + validateConfig( + CONFIG_KEYS.OCO_MODEL, + [ + 'gpt-3.5-turbo', + 'gpt-3.5-turbo-0125', + 'gpt-4', + 'gpt-4-1106-preview', + 'gpt-4-turbo-preview', + 'gpt-4-0125-preview' + ].includes(value), + `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo' (default), 'gpt-3.5-turbo-0125', 'gpt-4-1106-preview', 'gpt-4-turbo-preview' or 'gpt-4-0125-preview'` + ); + } return value; }, [CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER](value: any) { @@ -190,7 +221,8 @@ export const configValidators = { [ '', 'openai', - 'ollama' + 'ollama', + 'gemini', ].includes(value), `${value} is not supported yet, use 'ollama' or 'openai' (default)` ); @@ -216,7 +248,7 @@ const configPath = pathJoin(homedir(), '.opencommit'); export const getConfig = (): ConfigType | null => { const configFromEnv = { - OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY, + OCO_API_KEY: process.env.OCO_API_KEY || process.env.OCO_OPENAI_API_KEY, OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT ? Number(process.env.OCO_TOKENS_MAX_INPUT) : undefined, @@ -237,7 +269,7 @@ export const getConfig = (): ConfigType | null => { const configExists = existsSync(configPath); if (!configExists) return configFromEnv; - + const configFile = readFileSync(configPath, 'utf8'); const config = iniParse(configFile); diff --git a/src/commands/prepare-commit-msg-hook.ts b/src/commands/prepare-commit-msg-hook.ts index 6e3e730b..1478e248 100644 --- a/src/commands/prepare-commit-msg-hook.ts +++ b/src/commands/prepare-commit-msg-hook.ts @@ -39,7 +39,7 @@ export const prepareCommitMessageHook = async ( const config = getConfig(); - if (!config?.OCO_OPENAI_API_KEY) { + if (!config?.OCO_OPENAI_API_KEY && !!config?.OCO_API_KEY) { throw new Error( 'No OPEN_AI_API exists. Set your OPEN_AI_API= in ~/.opencommit' ); diff --git a/src/engine/gemini.ts b/src/engine/gemini.ts new file mode 100644 index 00000000..cb9b1341 --- /dev/null +++ b/src/engine/gemini.ts @@ -0,0 +1,185 @@ +import { Content, GenerativeModel, GoogleGenerativeAI, HarmBlockThreshold, HarmCategory } from '@google/generative-ai'; +import { ChatCompletionRequestMessage } from 'openai'; +import { AiEngine } from './Engine'; +import { CONFIG_MODES, DEFAULT_TOKEN_LIMITS, getConfig } from '../commands/config'; +import { intro, outro } from '@clack/prompts'; +import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; +import chalk from 'chalk'; + +type GeminiMessagePart = { + text: string; +} + +type GeminiChatHistory = { + role: 'user' | 'model'; + parts: GeminiMessagePart[]; +} + +export class GeminiAi implements AiEngine { + private readonly config = getConfig(); + private get MAX_TOKENS_OUTPUT() { + return this.config?.OCO_TOKENS_MAX_OUTPUT || 30720; + } + private get MAX_TOKENS_INPUT() { + return this.config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT; + } + private readonly genAi: GoogleGenerativeAI; + private readonly model: GenerativeModel; + + constructor() { + const config = getConfig(); + + const apiKey = config?.OCO_API_KEY || config?.OCO_OPENAI_API_KEY; + this.genAi = new GoogleGenerativeAI(apiKey); + + const isGeminiModel = (config?.OCO_AI_PROVIDER as string).trim().toLowerCase() === 'gemini'; + + const [command, mode] = process.argv.slice(2); + + if (!apiKey && command !== 'config' && mode !== CONFIG_MODES.set && !isGeminiModel) { + intro('opencommit'); + + outro( + 'OCO_API_KEY is not set, please run `oco config set OCO_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`' + ); + outro( + 'For help look into README https://github.com/di-sukharev/opencommit#setup' + ); + + process.exit(1); + } + + const VALID_MODELS = ['gemini-1.0-pro-latest', 'gemini-pro', 'gemini-1.0-pro-001', 'gemini-1.5-pro-latest']; + const DEFAULT_MODEL = 'gemini-1.5-pro-latest'; + const MODEL = (config?.OCO_MODEL || DEFAULT_MODEL).trim().toLowerCase(); + + if (!VALID_MODELS.includes(MODEL)) { + intro('opencommit'); + outro( + `OCO_MODEL is not set to a valid model. Please run 'oco config set OCO_MODEL=${DEFAULT_MODEL}'` + ); + outro('Valid models are: ' + VALID_MODELS.join(', ')); + process.exit(1); + } + + this.model = this.genAi.getGenerativeModel({ + model: MODEL, + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + }, + { + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + }, + { + category: HarmCategory.HARM_CATEGORY_HARASSMENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + }, + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + }, + ] + }); + } + + async generateCommitMessage( + messages: Array + ): Promise { + const systemMessages = [] as GeminiMessagePart[]; + const history = { + user: [] as GeminiMessagePart[], + model: [] as GeminiMessagePart[], + }; + + systemMessages.push(...messages.map(m => { + if (m.role === 'system') { + return { text: `${m.content}\n\n + The commit message should start with a single subject line that is a brief overview description that summarizes all of the changes. It should not + exceed 50 characters and should be capitalized and written in the imperative mood. The subject line should be followed by a blank line and then the body of the commit message.\n\n + The body of the commit should provide more details about the changes made. Each commit message should be a single logical change.\n\n + Here's an example of a well-formed commit message:\n\n + Adds support for the Gemini language model engine, allowing users to utilize Gemini for generating commit messages.\n\n + ✨ (utils/engine.ts): add support for Gemini engine\n + ♻️ (openAi.ts & utils/engine.ts): add support for OCO_API_KEY env variable to configure apiKey\n + ` }; + } + + if (m.role === 'user') { + return { text: `This is an example of a git diff --staged command output, it should not be included in the commit message: \n\n${m.content}`}; + } + + return { text: m.content }; + })); + + // for (const message of messages) { + // if (message.role === 'system') { + // systemMessages.push({ text: message.content }); + // continue; + // } + + // const role = message.role === 'user' ? 'user' : 'model'; + // const parts = [message.content]; + + // history[role].push(...parts.map(p => ({ text: p }))); + // } + + let prompt: GeminiChatHistory[] = [ + { + role: 'user', + parts: [...history.user], + }, + { + role: 'model', + parts: [...history.model], + } + ]; + + // let prompt = messages.map(m => ({ + // role: m.role === 'system' ? 'user' : m.role, + // parts: [{text: m.content}], + // })); + // we are filtering assistant messages and messages that are not related to the code changes + // .filter(x => x.role !== 'assistant' && !x.content.includes('diff --git a/src/server.ts b/src/server.ts')) + // .map((x) => x.content) + // .join('\n\n'); + + // prompt += 'You MUST NEVER include any of the output from the `git diff` command in your commit message, and you can ignore changes inside of the `out` directory.\n\n'; + // prompt += `You should include a brief summary of changes to each file in the 'git diff' output as part of the commit message.\n\n`; + // prompt += 'Lastly, please do not include contextual information explaining new libraries or tools that were added to the project. This information is not necessary for the commit message. The commit message should concisely focus on the changes made to the codebase.'; + + const requestTokens = await this.model.countTokens(prompt.map(p => p.parts.join('\n'))); + const tokenLimit = Math.abs(this.MAX_TOKENS_INPUT - this.MAX_TOKENS_OUTPUT); + + if (requestTokens.totalTokens > tokenLimit) { + throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); + } + + try { + const chat = await this.model.startChat({ + systemInstruction: { role: 'system', parts: systemMessages } as Content, + // history: prompt, + }); + + const result = await chat.sendMessage([ + { text: 'You MUST NEVER include any of the output from the `git diff --staged` command in your commit message, and you can ignore changes inside of the `out` directory.' }, + { text: `You should include a brief summary of changes to each file in the 'git diff --staged' output as part of the commit message.` }, + { text: 'Lastly, please do not include contextual information explaining new libraries or tools that were added to the project. This information is not necessary for the commit message. The commit message should concisely focus on the changes made to the codebase.' }, + ]); + + // const result = await this.model.generateContent(prompt); + const response = await result.response; + const answer = response.text(); + + return answer; + } catch (err: any) { + const error = err as Error; + + outro(`${chalk.red('✖')} ${err?.message || err}`); + + throw error; + } + } +} diff --git a/src/engine/index.ts b/src/engine/index.ts new file mode 100644 index 00000000..9124864c --- /dev/null +++ b/src/engine/index.ts @@ -0,0 +1,3 @@ +export * from './openAi'; +export * from './gemini'; +export * from './ollama'; diff --git a/src/engine/ollama.ts b/src/engine/ollama.ts index f431ee1b..56e68888 100644 --- a/src/engine/ollama.ts +++ b/src/engine/ollama.ts @@ -35,4 +35,3 @@ export class OllamaAi implements AiEngine { } } -export const ollamaAi = new OllamaAi(); diff --git a/src/engine/openAi.ts b/src/engine/openAi.ts index 4148073c..a0a6f4f3 100644 --- a/src/engine/openAi.ts +++ b/src/engine/openAi.ts @@ -1,6 +1,5 @@ import axios from 'axios'; import chalk from 'chalk'; -import { execa } from 'execa'; import { ChatCompletionRequestMessage, Configuration as OpenAiApiConfiguration, @@ -23,18 +22,17 @@ const config = getConfig(); const MAX_TOKENS_OUTPUT = config?.OCO_TOKENS_MAX_OUTPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT; const MAX_TOKENS_INPUT = config?.OCO_TOKENS_MAX_INPUT || DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_INPUT; let basePath = config?.OCO_OPENAI_BASE_PATH; -let apiKey = config?.OCO_OPENAI_API_KEY +let apiKey = config?.OCO_API_KEY || config?.OCO_OPENAI_API_KEY; const [command, mode] = process.argv.slice(2); const isLocalModel = config?.OCO_AI_PROVIDER == 'ollama' - if (!apiKey && command !== 'config' && mode !== CONFIG_MODES.set && !isLocalModel) { intro('opencommit'); outro( - 'OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_OPENAI_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`' + 'OCO_OPENAI_API_KEY is not set, please run `oco config set OCO_API_KEY= . If you are using GPT, make sure you add payment details, so API works.`' ); outro( 'For help look into README https://github.com/di-sukharev/opencommit#setup' @@ -45,7 +43,7 @@ if (!apiKey && command !== 'config' && mode !== CONFIG_MODES.set && !isLocalMode const MODEL = config?.OCO_MODEL || 'gpt-3.5-turbo'; -class OpenAi implements AiEngine { +export class OpenAi implements AiEngine { private openAiApiConfiguration = new OpenAiApiConfiguration({ apiKey: apiKey }); @@ -105,6 +103,3 @@ class OpenAi implements AiEngine { }; } - - -export const api = new OpenAi(); diff --git a/src/generateCommitMessageFromGitDiff.ts b/src/generateCommitMessageFromGitDiff.ts index d953df89..a8032d1f 100644 --- a/src/generateCommitMessageFromGitDiff.ts +++ b/src/generateCommitMessageFromGitDiff.ts @@ -40,7 +40,7 @@ const ADJUSTMENT_FACTOR = 20; export const generateCommitMessageByDiff = async ( diff: string, - fullGitMojiSpec: boolean + fullGitMojiSpec = false ): Promise => { try { const INIT_MESSAGES_PROMPT = await getMainCommitPrompt(fullGitMojiSpec); @@ -62,11 +62,9 @@ export const generateCommitMessageByDiff = async ( fullGitMojiSpec ); - const commitMessages = []; - for (const promise of commitMessagePromises) { - commitMessages.push(await promise); - await delay(2000); - } + let commitMessages: string[] = []; + const messages = await Promise.all(commitMessagePromises) as string[]; + commitMessages = [...commitMessages, ...messages]; return commitMessages.join('\n\n'); } @@ -100,7 +98,7 @@ function getMessagesPromisesByChangesInFile( maxChangeLength ); - const lineDiffsWithHeader = []; + const lineDiffsWithHeader: string[] = []; for (const change of mergedChanges) { const totalChange = fileHeader + change; if (tokenCount(totalChange) > maxChangeLength) { @@ -129,7 +127,7 @@ function getMessagesPromisesByChangesInFile( function splitDiff(diff: string, maxChangeLength: number) { const lines = diff.split('\n'); - const splitDiffs = []; + const splitDiffs: string[] = []; let currentDiff = ''; if (maxChangeLength <= 0) { @@ -175,7 +173,7 @@ export const getCommitMsgsPromisesFromFileDiffs = async ( // merge multiple files-diffs into 1 prompt to save tokens const mergedFilesDiffs = mergeDiffs(diffByFiles, maxDiffLength); - const commitMessagePromises = []; + const commitMessagePromises: Promise[] = []; for (const fileDiff of mergedFilesDiffs) { if (tokenCount(fileDiff) >= maxDiffLength) { diff --git a/src/prompts.ts b/src/prompts.ts index 74f59138..7e759618 100644 --- a/src/prompts.ts +++ b/src/prompts.ts @@ -27,6 +27,7 @@ const INIT_MAIN_PROMPT = ( content: `${IDENTITY} Your mission is to create clean and comprehensive commit messages as per the ${ fullGitMojiSpec ? 'GitMoji specification' : 'conventional commit convention' } and explain WHAT were the changes and mainly WHY the changes were done. I'll send you an output of 'git diff --staged' command, and you are to convert it into a commit message. + You should only include the changes that are relevant to the commit. Do not include any changes that are not relevant to the commit. ${ config?.OCO_EMOJI ? 'Use GitMoji convention to preface the commit. Here are some help to choose the right emoji (emoji, description): ' + diff --git a/src/utils/engine.ts b/src/utils/engine.ts index 74d780b1..6b3e87ed 100644 --- a/src/utils/engine.ts +++ b/src/utils/engine.ts @@ -1,13 +1,16 @@ import { AiEngine } from '../engine/Engine'; -import { api } from '../engine/openAi'; import { getConfig } from '../commands/config'; -import { ollamaAi } from '../engine/ollama'; +import * as engines from '../engine'; export function getEngine(): AiEngine { const config = getConfig(); + if (config?.OCO_AI_PROVIDER == 'ollama') { - return ollamaAi; + return new engines.OllamaAi(); + } else if (config?.OCO_AI_PROVIDER == 'gemini') { + return new engines.GeminiAi(); } + //open ai gpt by default - return api; + return new engines.OpenAi(); } diff --git a/tsconfig.json b/tsconfig.json index 3998f291..650fd67d 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,9 +1,9 @@ { "compilerOptions": { - "target": "ESNext", - "lib": ["ES5", "ES6"], + "target": "ES2020", + "lib": ["es6", "es2020"], - "module": "ESNext", + "module": "CommonJS", // "rootDir": "./src", "resolveJsonModule": true, "moduleResolution": "node", @@ -13,17 +13,15 @@ "outDir": "./out", "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, "strict": true, "noUnusedLocals": true, "noUnusedParameters": true, - "skipLibCheck": true + "skipLibCheck": true, + "noImplicitAny": false, }, "exclude": ["node_modules"], - "ts-node": { - "esm": true, - "experimentalSpecifierResolution": "node" - } }