feat(chat): support other local llm #331

This commit is contained in:
Jacky 2024-05-01 19:40:38 +08:00
parent 08631437ee
commit 3b116b3654
No known key found for this signature in database
GPG key ID: 215C21B10DF38B4D
4 changed files with 356 additions and 361 deletions

View file

@ -1,134 +1,123 @@
package openai
import (
"context"
"crypto/tls"
"fmt"
"github.com/0xJacky/Nginx-UI/api"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/sashabaranov/go-openai"
"io"
"net/http"
"net/url"
"os"
"context"
"crypto/tls"
"fmt"
"github.com/0xJacky/Nginx-UI/api"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/sashabaranov/go-openai"
"io"
"net/http"
"net/url"
"os"
)
const ChatGPTInitPrompt = "You are a assistant who can help users write and optimise the configurations of Nginx, the first user message contains the content of the configuration file which is currently opened by the user and the current language code(CLC). You suppose to use the language corresponding to the CLC to give the first reply. Later the language environment depends on the user message. The first reply should involve the key information of the file and ask user what can you help them."
func MakeChatCompletionRequest(c *gin.Context) {
var json struct {
Messages []openai.ChatCompletionMessage `json:"messages"`
}
var json struct {
Messages []openai.ChatCompletionMessage `json:"messages"`
}
if !api.BindAndValid(c, &json) {
return
}
if !api.BindAndValid(c, &json) {
return
}
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: ChatGPTInitPrompt,
},
}
messages = append(messages, json.Messages...)
// sse server
c.Writer.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: ChatGPTInitPrompt,
},
}
messages = append(messages, json.Messages...)
// sse server
c.Writer.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
if settings.OpenAISettings.Token == "" {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": "[Error] OpenAI token is empty",
})
return false
})
return
}
config := openai.DefaultConfig(settings.OpenAISettings.Token)
config := openai.DefaultConfig(settings.OpenAISettings.Token)
if settings.OpenAISettings.Proxy != "" {
proxyUrl, err := url.Parse(settings.OpenAISettings.Proxy)
if err != nil {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
config.HTTPClient = &http.Client{
Transport: transport,
}
}
if settings.OpenAISettings.Proxy != "" {
proxyUrl, err := url.Parse(settings.OpenAISettings.Proxy)
if err != nil {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
config.HTTPClient = &http.Client{
Transport: transport,
}
}
if settings.OpenAISettings.BaseUrl != "" {
config.BaseURL = settings.OpenAISettings.BaseUrl
}
if settings.OpenAISettings.BaseUrl != "" {
config.BaseURL = settings.OpenAISettings.BaseUrl
}
openaiClient := openai.NewClientWithConfig(config)
ctx := context.Background()
openaiClient := openai.NewClientWithConfig(config)
ctx := context.Background()
req := openai.ChatCompletionRequest{
Model: settings.OpenAISettings.Model,
Messages: messages,
Stream: true,
}
stream, err := openaiClient.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("CompletionStream error: %v\n", err)
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
defer stream.Close()
msgChan := make(chan string)
go func() {
defer close(msgChan)
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println()
return
}
req := openai.ChatCompletionRequest{
Model: settings.OpenAISettings.Model,
Messages: messages,
Stream: true,
}
stream, err := openaiClient.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("CompletionStream error: %v\n", err)
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
defer stream.Close()
msgChan := make(chan string)
go func() {
defer close(msgChan)
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println()
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
message := fmt.Sprintf("%s", response.Choices[0].Delta.Content)
fmt.Printf("%s", message)
_ = os.Stdout.Sync()
message := fmt.Sprintf("%s", response.Choices[0].Delta.Content)
fmt.Printf("%s", message)
_ = os.Stdout.Sync()
msgChan <- message
}
}()
msgChan <- message
}
}()
c.Stream(func(w io.Writer) bool {
if m, ok := <-msgChan; ok {
c.SSEvent("message", gin.H{
"type": "message",
"content": m,
})
return true
}
return false
})
c.Stream(func(w io.Writer) bool {
if m, ok := <-msgChan; ok {
c.SSEvent("message", gin.H{
"type": "message",
"content": m,
})
return true
}
return false
})
}