diff --git a/go.mod b/go.mod index b67003e5fc057..223da44d92d30 100644 --- a/go.mod +++ b/go.mod @@ -230,6 +230,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/daviddengcn/go-colortext v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dlclark/regexp2 v1.9.0 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/dvsekhvalnov/jose2go v1.5.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.1.0 // indirect @@ -332,12 +333,14 @@ require ( github.com/rs/zerolog v1.28.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect + github.com/sashabaranov/go-openai v1.9.3 github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/thales-e-security/pool v0.0.2 // indirect + github.com/tiktoken-go/tokenizer v0.1.0 github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.1 // indirect diff --git a/go.sum b/go.sum index 779e743756b92..d028f1a7ca577 100644 --- a/go.sum +++ b/go.sum @@ -350,6 +350,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.9.0 h1:pTK/l/3qYIKaRXuHnEnIf7Y5NxfRPfpb7dis6/gdlVI= +github.com/dlclark/regexp2 v1.9.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= @@ -1166,6 +1168,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sashabaranov/go-openai v1.9.3 h1:uNak3Rn5pPsKRs9bdT7RqRZEyej/zdZOEI2/8wvrFtM= +github.com/sashabaranov/go-openai v1.9.3/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/progressbar/v3 v3.13.0 h1:9TeeWRcjW2qd05I8Kf9knPkW4vLM/hYoa6z9ABvxje8= github.com/schollz/progressbar/v3 v3.13.0/go.mod h1:ZBYnSuLAX2LU8P8UiKN/KgF2DY58AJC8yfVYLPC8Ly4= @@ -1235,6 +1239,8 @@ github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gt github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tiktoken-go/tokenizer v0.1.0 h1:c1fXriHSR/NmhMDTwUDLGiNhHwTV+ElABGvqhCWLRvY= +github.com/tiktoken-go/tokenizer v0.1.0/go.mod h1:7SZW3pZUKWLJRilTvWCa86TOVIiiJhYj3FQ5V3alWcg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= diff --git a/lib/ai/chat.go b/lib/ai/chat.go new file mode 100644 index 0000000000000..d6d7314c230c7 --- /dev/null +++ b/lib/ai/chat.go @@ -0,0 +1,229 @@ +/* + * Copyright 2023 Gravitational, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ai + +import ( + "context" + "encoding/json" + "errors" + "io" + "strings" + + "github.com/gravitational/trace" + "github.com/sashabaranov/go-openai" + "github.com/tiktoken-go/tokenizer" +) + +const maxResponseTokens = 2000 + +// Chat represents a conversation between a user and an assistant with context memory. +type Chat struct { + client *Client + messages []openai.ChatCompletionMessage + tokenizer tokenizer.Codec +} + +// Insert inserts a message into the conversation. This is commonly in the +// form of a user's input but may also take the form of a system messages used for instructions. +func (chat *Chat) Insert(role string, content string) Message { + chat.messages = append(chat.messages, openai.ChatCompletionMessage{ + Role: role, + Content: content, + }) + + return Message{ + Role: role, + Content: content, + Idx: len(chat.messages) - 1, + } +} + +// PromptTokens uses the chat's tokenizer to calculate +// the total number of tokens in the prompt +// +// Ref: https://github.com/openai/openai-cookbook/blob/594fc6c952425810e9ea5bd1a275c8ca5f32e8f9/examples/How_to_count_tokens_with_tiktoken.ipynb +func (chat *Chat) PromptTokens() (int, error) { + // perRequest is the number of tokens used up for each completion request + const perRequest = 3 + // perRole is the number of tokens used to encode a message's role + const perRole = 1 + // perMessage is the token "overhead" for each message + const perMessage = 3 + + sum := perRequest + for _, m := range chat.messages { + tokens, _, err := chat.tokenizer.Encode(m.Content) + if err != nil { + return 0, trace.Wrap(err) + } + sum += len(tokens) + sum += perRole + sum += perMessage + } + + return sum, nil +} + +// Summary creates a short summary for the given input. +func (chat *Chat) Summary(ctx context.Context, message string) (string, error) { + resp, err := chat.client.svc.CreateChatCompletion( + ctx, + openai.ChatCompletionRequest{ + Model: openai.GPT4, + Messages: []openai.ChatCompletionMessage{ + {Role: openai.ChatMessageRoleSystem, Content: promptSummarizeTitle}, + {Role: openai.ChatMessageRoleUser, Content: message}, + }, + }, + ) + + if err != nil { + return "", trace.Wrap(err) + } + + return resp.Choices[0].Message.Content, nil +} + +// Complete completes the conversation with a message from the assistant based on the current context. +// On success, it returns the message and the number of tokens used for the completion. +// Returned types: +// - Message: the message from the assistant +// - int: the number of tokens used for the completion +// - error: an error if one occurred +// Message types: +// - CompletionCommand: a command from the assistant +// - StreamingMessage: a message that is streamed from the assistant +func (chat *Chat) Complete(ctx context.Context) (any, error) { + var numTokens int + + // if the chat is empty, return the initial response we predefine instead of querying GPT-4 + if len(chat.messages) == 1 { + return &Message{ + Role: openai.ChatMessageRoleAssistant, + Content: initialAIResponse, + Idx: len(chat.messages) - 1, + }, nil + } + + // if not, copy the current chat log to a new slice and append the suffix instruction + messages := make([]openai.ChatCompletionMessage, len(chat.messages)+1) + copy(messages, chat.messages) + messages[len(messages)-1] = openai.ChatCompletionMessage{ + Role: openai.ChatMessageRoleUser, + Content: promptExtractInstruction, + } + + // create a streaming completion request, we do this to optimistically stream the response when + // we don't believe it's a payload + stream, err := chat.client.svc.CreateChatCompletionStream( + ctx, + openai.ChatCompletionRequest{ + Model: openai.GPT4, + Messages: messages, + MaxTokens: maxResponseTokens, + Stream: true, + }, + ) + if err != nil { + return nil, trace.Wrap(err) + } + + var ( + response openai.ChatCompletionStreamResponse + trimmed string + ) + for trimmed == "" { + // fetch the first delta to check for a possible JSON payload + response, err = stream.Recv() + if err != nil { + return nil, trace.Wrap(err) + } + numTokens++ + + trimmed = strings.TrimSpace(response.Choices[0].Delta.Content) + } + + // if it looks like a JSON payload, let's wait for the entire response and try to parse it + if strings.HasPrefix(trimmed, "{") { + payload := strings.Builder{} + payload.WriteString(response.Choices[0].Delta.Content) + + for { + response, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, trace.Wrap(err) + } + numTokens++ + + payload.WriteString(response.Choices[0].Delta.Content) + } + + // if we can parse it, return the parsed payload, otherwise return a non-streaming message + var c CompletionCommand + err = json.Unmarshal([]byte(payload.String()), &c) + switch err { + case nil: + c.NumTokens = numTokens + return &c, nil + default: + return &Message{ + Role: openai.ChatMessageRoleAssistant, + Content: payload.String(), + Idx: len(chat.messages) - 1, + NumTokens: numTokens, + }, nil + } + } + + // if it doesn't look like a JSON payload, return a streaming message to the caller + chunks := make(chan string, 1) + errCh := make(chan error) + chunks <- response.Choices[0].Delta.Content + go func() { + defer close(chunks) + + for { + response, err := stream.Recv() + switch { + case errors.Is(err, io.EOF): + return + case err != nil: + select { + case <-ctx.Done(): + case errCh <- trace.Wrap(err): + } + return + } + + select { + case chunks <- response.Choices[0].Delta.Content: + case <-ctx.Done(): + return + } + } + }() + + return &StreamingMessage{ + Role: openai.ChatMessageRoleAssistant, + Idx: len(chat.messages) - 1, + Chunks: chunks, + Error: errCh, + }, nil +} diff --git a/lib/ai/chat_test.go b/lib/ai/chat_test.go new file mode 100644 index 0000000000000..c1673d25ad88a --- /dev/null +++ b/lib/ai/chat_test.go @@ -0,0 +1,210 @@ +/* + * Copyright 2023 Gravitational, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ai + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/sashabaranov/go-openai" + "github.com/stretchr/testify/require" + "github.com/tiktoken-go/tokenizer/codec" +) + +func TestChat_PromptTokens(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + messages []openai.ChatCompletionMessage + want int + wantErr bool + }{ + { + name: "empty", + messages: []openai.ChatCompletionMessage{}, + want: 3, + }, + { + name: "only system message", + messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: "Hello", + }, + }, + want: 8, + }, + { + name: "system and user messages", + messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: "Hello", + }, + { + Role: openai.ChatMessageRoleUser, + Content: "Hi LLM.", + }, + }, + want: 16, + }, + { + name: "tokenize our prompt", + messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: promptCharacter("Bob"), + }, + { + Role: openai.ChatMessageRoleUser, + Content: "Show me free disk space on localhost node.", + }, + }, + want: 187, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + chat := &Chat{ + messages: tt.messages, + tokenizer: codec.NewCl100kBase(), + } + usedTokens, err := chat.PromptTokens() + require.NoError(t, err) + require.Equal(t, tt.want, usedTokens) + }) + } +} + +func TestChat_Complete(t *testing.T) { + t.Parallel() + + responses := [][]byte{ + generateTextResponse(), + generateCommandResponse(), + } + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/event-stream") + + require.GreaterOrEqual(t, len(responses), 1, "Unexpected request") + dataBytes := responses[0] + + _, err := w.Write(dataBytes) + require.NoError(t, err, "Write error") + + responses = responses[1:] + })) + defer server.Close() + + cfg := openai.DefaultConfig("secret-test-token") + cfg.BaseURL = server.URL + "/v1" + client := NewClientFromConfig(cfg) + + chat := client.NewChat("Bob") + + t.Run("initial message", func(t *testing.T) { + msg, err := chat.Complete(context.Background()) + require.NoError(t, err) + + expectedResp := &Message{Role: "assistant", + Content: "Hey, I'm Teleport - a powerful tool that can assist you in managing your Teleport cluster via ChatGPT.", + Idx: 0, + } + require.Equal(t, expectedResp, msg) + }) + + t.Run("text completion", func(t *testing.T) { + chat.Insert(openai.ChatMessageRoleUser, "Show me free disk space") + + msg, err := chat.Complete(context.Background()) + require.NoError(t, err) + + require.IsType(t, &StreamingMessage{}, msg) + streamingMessage := msg.(*StreamingMessage) + require.Equal(t, openai.ChatMessageRoleAssistant, streamingMessage.Role) + + require.Equal(t, "Which ", <-streamingMessage.Chunks) + require.Equal(t, "node do ", <-streamingMessage.Chunks) + require.Equal(t, "you want ", <-streamingMessage.Chunks) + require.Equal(t, "use?", <-streamingMessage.Chunks) + }) + + t.Run("command completion", func(t *testing.T) { + chat.Insert(openai.ChatMessageRoleUser, "localhost") + + msg, err := chat.Complete(context.Background()) + require.NoError(t, err) + + require.IsType(t, &CompletionCommand{}, msg) + command := msg.(*CompletionCommand) + require.Equal(t, "df -h", command.Command) + require.Len(t, command.Nodes, 1) + require.Equal(t, "localhost", command.Nodes[0]) + }) +} + +// generateTextResponse generates a response for a text completion +func generateTextResponse() []byte { + dataBytes := []byte{} + dataBytes = append(dataBytes, []byte("event: message\n")...) + + data := `{"id":"1","object":"completion","created":1598069254,"model":"gpt-4","choices":[{"index": 0, "delta":{"content": "Which ", "role": "assistant"}}]}` + dataBytes = append(dataBytes, []byte("data: "+data+"\n\n")...) + dataBytes = append(dataBytes, []byte("event: message\n")...) + + data = `{"id":"2","object":"completion","created":1598069254,"model":"gpt-4","choices":[{"index": 0, "delta":{"content": "node do ", "role": "assistant"}}]}` + dataBytes = append(dataBytes, []byte("data: "+data+"\n\n")...) + dataBytes = append(dataBytes, []byte("event: message\n")...) + + data = `{"id":"3","object":"completion","created":1598069255,"model":"gpt-4","choices":[{"index": 0, "delta":{"content": "you want ", "role": "assistant"}}]}` + dataBytes = append(dataBytes, []byte("data: "+data+"\n\n")...) + dataBytes = append(dataBytes, []byte("event: message\n")...) + + data = `{"id":"4","object":"completion","created":1598069254,"model":"gpt-4","choices":[{"index": 0, "delta":{"content": "use?", "role": "assistant"}}]}` + dataBytes = append(dataBytes, []byte("data: "+data+"\n\n")...) + dataBytes = append(dataBytes, []byte("event: done\n")...) + + dataBytes = append(dataBytes, []byte("data: [DONE]\n\n")...) + + return dataBytes +} + +// generateCommandResponse generates a response for the command "df -h" on the node "localhost" +func generateCommandResponse() []byte { + dataBytes := []byte{} + dataBytes = append(dataBytes, []byte("event: message\n")...) + + data := `{"id":"1","object":"completion","created":1598069254,"model":"gpt-4","choices":[{"index": 0, "delta":{"content": "{\"command\": \"df -h\",", "role": "assistant"}}]}` + dataBytes = append(dataBytes, []byte("data: "+data+"\n\n")...) + + dataBytes = append(dataBytes, []byte("event: message\n")...) + + data = `{"id":"2","object":"completion","created":1598069254,"model":"gpt-4","choices":[{"index": 0, "delta":{"content": "\"nodes\": [\"localhost\"]}", "role": "assistant"}}]}` + dataBytes = append(dataBytes, []byte("data: "+data+"\n\n")...) + + dataBytes = append(dataBytes, []byte("event: done\n")...) + dataBytes = append(dataBytes, []byte("data: [DONE]\n\n")...) + + return dataBytes +} diff --git a/lib/ai/client.go b/lib/ai/client.go new file mode 100644 index 0000000000000..3c295e97cff14 --- /dev/null +++ b/lib/ai/client.go @@ -0,0 +1,54 @@ +/* + * Copyright 2023 Gravitational, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ai + +import ( + "github.com/sashabaranov/go-openai" + "github.com/tiktoken-go/tokenizer/codec" +) + +// Client is a client for OpenAI API. +type Client struct { + svc *openai.Client +} + +// NewClient creates a new client for OpenAI API. +func NewClient(apiURL string) *Client { + return &Client{openai.NewClient(apiURL)} +} + +// NewClientFromConfig creates a new client for OpenAI API from config. +func NewClientFromConfig(config openai.ClientConfig) *Client { + return &Client{openai.NewClientWithConfig(config)} +} + +// NewChat creates a new chat. The username is set in the conversation context, +// so that the AI can use it to personalize the conversation. +func (client *Client) NewChat(username string) *Chat { + return &Chat{ + client: client, + messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: promptCharacter(username), + }, + }, + // Initialize a tokenizer for prompt token accounting. + // Cl100k is used by GPT-3 and GPT-4. + tokenizer: codec.NewCl100kBase(), + } +} diff --git a/lib/ai/messages.go b/lib/ai/messages.go new file mode 100644 index 0000000000000..8d5bd9c73df97 --- /dev/null +++ b/lib/ai/messages.go @@ -0,0 +1,58 @@ +/* + * Copyright 2023 Gravitational, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ai + +// Message represents a message within a live conversation. +// Indexed by ID for frontend ordering and future partial message streaming. +type Message struct { + Role string `json:"role"` + Content string `json:"content"` + Idx int `json:"idx"` + // NumTokens is the number of completion tokens for the (non-streaming) message + NumTokens int `json:"-"` +} + +// Label represents a label returned by OpenAI's completion API. +type Label struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// CompletionCommand represents a command returned by OpenAI's completion API. +type CompletionCommand struct { + Command string `json:"command,omitempty"` + Nodes []string `json:"nodes,omitempty"` + Labels []Label `json:"labels,omitempty"` + // NumTokens is the number of completion tokens for the (non-streaming) message + NumTokens int `json:"-"` +} + +// StreamingMessage represents a message that is streamed from the assistant and will later be stored as a normal message in the conversation store. +type StreamingMessage struct { + // Role describes the OpenAI role of the message, i.e its sender. + Role string + + // Idx is a semi-unique ID assigned when loading a conversation so that the UI can group partial messages together. + Idx int + + // Chunks is a channel of message chunks that are streamed from the assistant. + Chunks <-chan string + + // Error is a channel which may receive one error if the assistant encounters an error while streaming. + // Consumers should stop reading from all channels if they receive an error and abort. + Error <-chan error +} diff --git a/lib/ai/prompt.go b/lib/ai/prompt.go new file mode 100644 index 0000000000000..3dfd23fbe53a6 --- /dev/null +++ b/lib/ai/prompt.go @@ -0,0 +1,66 @@ +/* + * Copyright 2023 Gravitational, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ai + +import "fmt" + +const promptSummarizeTitle = `You will be given a message. Create a short summary of that message. +Respond only with summary, nothing else.` + +const initialAIResponse = `Hey, I'm Teleport - a powerful tool that can assist you in managing your Teleport cluster via ChatGPT.` + +const promptExtractInstruction = `If the input is a request to complete a task on a server, try to extract the following information: +- A Linux shell command +- One or more target servers +- One or more target labels + +If there is a lack of details, provide most logical solution. +Ensure the output is a valid shell command. +There must be at least one target server or label, otherwise we do not have enough information to complete the task. +Provide the output in the following format with no other text: + +{ + "command": "", + "nodes": ["", ""], + "labels": [ + { + "key": "", + "value": "", + }, + { + "key": "", + "value": "", + } + ] +} + +If the user is not asking to complete a task on a server directly but is asking a question related to Teleport or Linux - disregard this entire message and help them with their Teleport or Linux related request.` + +// promptCharacter is a prompt that sets the context for the conversation. +// Username is the name of the user that the AI is talking to. +func promptCharacter(username string) string { + return fmt.Sprintf(`You are Teleport, a tool that users can use to connect to Linux servers and run relevant commands, as well as have a conversation. +A Teleport cluster is a connectivity layer that allows access to a set of servers. Servers may also be referred to as nodes. +Nodes sometimes have labels such as "production" and "staging" assigned to them. Labels are used to group nodes together. +You will engage in professional conversation with the user and help accomplish tasks such as executing tasks +within the cluster or answering relevant questions about Teleport, Linux or the cluster itself. + +You are not permitted to engage in conversation that is not related to Teleport, Linux or the cluster itself. +If this user asks such an unrelated question, you must concisely respond that it is beyond your scope of knowledge. + +You are talking to %v.`, username) +}