enhance: add settings for skip tls cert check

This commit is contained in:
Jacky 2024-07-30 15:10:02 +08:00
parent 013d810678
commit f1c0f8ddca
No known key found for this signature in database
GPG key ID: 215C21B10DF38B4D
8 changed files with 400 additions and 394 deletions

View file

@ -1,18 +1,18 @@
package openai
import (
"context"
"crypto/tls"
"fmt"
"github.com/0xJacky/Nginx-UI/api"
"github.com/0xJacky/Nginx-UI/internal/chatbot"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/sashabaranov/go-openai"
"io"
"net/http"
"net/url"
"context"
"crypto/tls"
"fmt"
"github.com/0xJacky/Nginx-UI/api"
"github.com/0xJacky/Nginx-UI/internal/chatbot"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/sashabaranov/go-openai"
"io"
"net/http"
"net/url"
)
const ChatGPTInitPrompt = `You are a assistant who can help users write and optimise the configurations of Nginx,
@ -22,111 +22,111 @@ Later the language environment depends on the user message.
The first reply should involve the key information of the file and ask user what can you help them.`
func MakeChatCompletionRequest(c *gin.Context) {
var json struct {
Filepath string `json:"filepath"`
Messages []openai.ChatCompletionMessage `json:"messages"`
}
var json struct {
Filepath string `json:"filepath"`
Messages []openai.ChatCompletionMessage `json:"messages"`
}
if !api.BindAndValid(c, &json) {
return
}
if !api.BindAndValid(c, &json) {
return
}
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: ChatGPTInitPrompt,
},
}
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: ChatGPTInitPrompt,
},
}
messages = append(messages, json.Messages...)
messages = append(messages, json.Messages...)
if json.Filepath != "" {
messages = chatbot.ChatCompletionWithContext(json.Filepath, messages)
}
if json.Filepath != "" {
messages = chatbot.ChatCompletionWithContext(json.Filepath, messages)
}
// SSE server
c.Writer.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
// SSE server
c.Writer.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
config := openai.DefaultConfig(settings.OpenAISettings.Token)
config := openai.DefaultConfig(settings.OpenAISettings.Token)
if settings.OpenAISettings.Proxy != "" {
proxyUrl, err := url.Parse(settings.OpenAISettings.Proxy)
if err != nil {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
config.HTTPClient = &http.Client{
Transport: transport,
}
}
if settings.OpenAISettings.Proxy != "" {
proxyUrl, err := url.Parse(settings.OpenAISettings.Proxy)
if err != nil {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
TLSClientConfig: &tls.Config{InsecureSkipVerify: settings.ServerSettings.InsecureSkipVerify},
}
config.HTTPClient = &http.Client{
Transport: transport,
}
}
if settings.OpenAISettings.BaseUrl != "" {
config.BaseURL = settings.OpenAISettings.BaseUrl
}
if settings.OpenAISettings.BaseUrl != "" {
config.BaseURL = settings.OpenAISettings.BaseUrl
}
openaiClient := openai.NewClientWithConfig(config)
ctx := context.Background()
openaiClient := openai.NewClientWithConfig(config)
ctx := context.Background()
req := openai.ChatCompletionRequest{
Model: settings.OpenAISettings.Model,
Messages: messages,
Stream: true,
}
stream, err := openaiClient.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("CompletionStream error: %v\n", err)
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
defer stream.Close()
msgChan := make(chan string)
go func() {
defer close(msgChan)
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println()
return
}
req := openai.ChatCompletionRequest{
Model: settings.OpenAISettings.Model,
Messages: messages,
Stream: true,
}
stream, err := openaiClient.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("CompletionStream error: %v\n", err)
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{
"type": "error",
"content": err.Error(),
})
return false
})
return
}
defer stream.Close()
msgChan := make(chan string)
go func() {
defer close(msgChan)
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println()
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
message := fmt.Sprintf("%s", response.Choices[0].Delta.Content)
message := fmt.Sprintf("%s", response.Choices[0].Delta.Content)
msgChan <- message
}
}()
msgChan <- message
}
}()
c.Stream(func(w io.Writer) bool {
if m, ok := <-msgChan; ok {
c.SSEvent("message", gin.H{
"type": "message",
"content": m,
})
return true
}
return false
})
c.Stream(func(w io.Writer) bool {
if m, ok := <-msgChan; ok {
c.SSEvent("message", gin.H{
"type": "message",
"content": m,
})
return true
}
return false
})
}