-
Notifications
You must be signed in to change notification settings - Fork 0
/
llm.go
98 lines (80 loc) · 1.95 KB
/
llm.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
package main
import (
"context"
"path/filepath"
ollama "github.com/ollama/ollama/api"
"github.com/rs/zerolog"
"gopkg.in/natefinch/lumberjack.v2"
)
var llmLogger = zerolog.New(&lumberjack.Logger{
Filename: filepath.Join(getConfigDir(), "llm.log"),
MaxSize: 50,
MaxBackups: 10,
}).With().Timestamp().Logger()
type LLMClient struct {
ol *ollama.Client
}
func NewLLMClient() (*LLMClient, error) {
ol, err := ollama.ClientFromEnvironment()
if err != nil {
return nil, err
}
return &LLMClient{
ol: ol,
}, nil
}
type CompletionResponse struct {
Text string
Done bool
}
type GenerateResponseFunc = func(CompletionResponse) error
func (lc *LLMClient) StreamCompletion(ctx context.Context, model, text string, handler GenerateResponseFunc) error {
stream := true
output := ""
defer func(resp *string) {
llmLogger.Info().
Str("model", model).
Str("prompt", text).
Str("response", *resp).
Msg("Finished streaming completion")
}(&output)
return lc.ol.Generate(ctx, &ollama.GenerateRequest{
Model: model,
Prompt: text,
Stream: &stream,
}, func(gr ollama.GenerateResponse) error {
output += gr.Response
return handler(CompletionResponse{
Text: gr.Response,
Done: gr.Done,
})
})
}
func (lc *LLMClient) GenerateCompletion(ctx context.Context, model, text string) (string, error) {
stream := false
var completion string
err := lc.ol.Generate(ctx, &ollama.GenerateRequest{
Model: model,
Prompt: text,
Stream: &stream,
}, func(gr ollama.GenerateResponse) error {
completion = gr.Response
return nil
})
llmLogger.Info().
Str("model", model).
Str("prompt", text).
Str("response", completion).
Msg("Finished completion")
return completion, err
}
func (lc *LLMClient) GetEmbedding(ctx context.Context, model, text string) ([]float64, error) {
resp, err := lc.ol.Embeddings(ctx, &ollama.EmbeddingRequest{
Model: model,
Prompt: text,
})
if err != nil {
return nil, err
}
return resp.Embedding, nil
}