diff --git a/api/openai/code_completion.go b/api/openai/code_completion.go new file mode 100644 index 00000000..4615529f --- /dev/null +++ b/api/openai/code_completion.go @@ -0,0 +1,82 @@ +package openai + +import ( + "net/http" + "sync" + "time" + + "github.com/0xJacky/Nginx-UI/api" + "github.com/0xJacky/Nginx-UI/internal/helper" + "github.com/0xJacky/Nginx-UI/internal/llm" + "github.com/0xJacky/Nginx-UI/settings" + "github.com/gin-gonic/gin" + "github.com/gorilla/websocket" + "github.com/uozi-tech/cosy" + "github.com/uozi-tech/cosy/logger" +) + +var mutex sync.Mutex + +// CodeCompletion handles code completion requests +func CodeCompletion(c *gin.Context) { + if !settings.OpenAISettings.EnableCodeCompletion { + cosy.ErrHandler(c, llm.ErrCodeCompletionNotEnabled) + return + } + + var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return true + }, + } + ws, err := upgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + cosy.ErrHandler(c, err) + return + } + defer ws.Close() + + for { + var codeCompletionRequest llm.CodeCompletionRequest + err := ws.ReadJSON(&codeCompletionRequest) + if err != nil { + if helper.IsUnexpectedWebsocketError(err) { + logger.Errorf("Error reading JSON: %v", err) + } + return + } + + codeCompletionRequest.UserID = api.CurrentUser(c).ID + + go func() { + start := time.Now() + completedCode, err := codeCompletionRequest.Send() + if err != nil { + logger.Errorf("Error sending code completion request: %v", err) + return + } + elapsed := time.Since(start) + + mutex.Lock() + defer mutex.Unlock() + + err = ws.WriteJSON(gin.H{ + "code": completedCode, + "request_id": codeCompletionRequest.RequestID, + "completion_ms": elapsed.Milliseconds(), + }) + if err != nil { + if helper.IsUnexpectedWebsocketError(err) { + logger.Errorf("Error writing JSON: %v", err) + } + return + } + }() + } +} + +func GetCodeCompletionEnabledStatus(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "enabled": settings.OpenAISettings.EnableCodeCompletion, + }) +} diff --git a/api/openai/openai.go b/api/openai/openai.go index db306bd7..0c61ca5f 100644 --- a/api/openai/openai.go +++ b/api/openai/openai.go @@ -4,15 +4,16 @@ import ( "context" "errors" "fmt" - "github.com/0xJacky/Nginx-UI/internal/chatbot" + "io" + "strings" + "time" + + "github.com/0xJacky/Nginx-UI/internal/llm" "github.com/0xJacky/Nginx-UI/settings" "github.com/gin-gonic/gin" "github.com/sashabaranov/go-openai" "github.com/uozi-tech/cosy" "github.com/uozi-tech/cosy/logger" - "io" - "strings" - "time" ) const ChatGPTInitPrompt = `You are a assistant who can help users write and optimise the configurations of Nginx, @@ -41,7 +42,7 @@ func MakeChatCompletionRequest(c *gin.Context) { messages = append(messages, json.Messages...) if json.Filepath != "" { - messages = chatbot.ChatCompletionWithContext(json.Filepath, messages) + messages = llm.ChatCompletionWithContext(json.Filepath, messages) } // SSE server @@ -50,7 +51,7 @@ func MakeChatCompletionRequest(c *gin.Context) { c.Writer.Header().Set("Connection", "keep-alive") c.Writer.Header().Set("Access-Control-Allow-Origin", "*") - openaiClient, err := chatbot.GetClient() + openaiClient, err := llm.GetClient() if err != nil { c.Stream(func(w io.Writer) bool { c.SSEvent("message", gin.H{ diff --git a/api/openai/router.go b/api/openai/router.go index d9ce13f0..108c9943 100644 --- a/api/openai/router.go +++ b/api/openai/router.go @@ -6,4 +6,7 @@ func InitRouter(r *gin.RouterGroup) { // ChatGPT r.POST("chatgpt", MakeChatCompletionRequest) r.POST("chatgpt_record", StoreChatGPTRecord) + // Code Completion + r.GET("code_completion", CodeCompletion) + r.GET("code_completion/enabled", GetCodeCompletionEnabledStatus) } diff --git a/app.example.ini b/app.example.ini index 73aaeca5..4399c2ee 100644 --- a/app.example.ini +++ b/app.example.ini @@ -70,6 +70,8 @@ Token = Proxy = Model = gpt-4o APIType = +EnableCodeCompletion = false +CodeCompletionModel = gpt-4o-mini [terminal] StartCmd = bash diff --git a/app/package.json b/app/package.json index 112a2fd8..5c7b65a9 100644 --- a/app/package.json +++ b/app/package.json @@ -43,6 +43,7 @@ "sse.js": "^2.6.0", "universal-cookie": "^8.0.1", "unocss": "^66.0.0", + "uuid": "^11.1.0", "vite-plugin-build-id": "0.5.0", "vue": "^3.5.13", "vue-dompurify-html": "^5.2.0", diff --git a/app/pnpm-lock.yaml b/app/pnpm-lock.yaml index 989c03f7..e5910370 100644 --- a/app/pnpm-lock.yaml +++ b/app/pnpm-lock.yaml @@ -98,6 +98,9 @@ importers: unocss: specifier: ^66.0.0 version: 66.0.0(postcss@8.5.3)(vite@6.2.6(@types/node@22.14.0)(jiti@2.4.2)(less@4.3.0)(tsx@4.19.2)(yaml@2.7.1))(vue@3.5.13(typescript@5.8.3)) + uuid: + specifier: ^11.1.0 + version: 11.1.0 vite-plugin-build-id: specifier: 0.5.0 version: 0.5.0 @@ -3842,6 +3845,10 @@ packages: util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} @@ -8217,6 +8224,8 @@ snapshots: util-deprecate@1.0.2: {} + uuid@11.1.0: {} + validate-npm-package-license@3.0.4: dependencies: spdx-correct: 3.2.0 diff --git a/app/src/api/openai.ts b/app/src/api/openai.ts index 626e7137..c6559de3 100644 --- a/app/src/api/openai.ts +++ b/app/src/api/openai.ts @@ -1,4 +1,5 @@ import http from '@/lib/http' +import ws from '@/lib/websocket' export interface ChatComplicationMessage { role: string @@ -6,10 +7,31 @@ export interface ChatComplicationMessage { name?: string } +export interface CodeCompletionRequest { + context: string // Context of the code + code: string // Code before the cursor + suffix?: string // Code after the cursor + language?: string // Programming language + position?: { // Cursor position + row: number + column: number + } +} + +export interface CodeCompletionResponse { + code: string // Completed code +} + const openai = { store_record(data: { file_name?: string, messages?: ChatComplicationMessage[] }) { return http.post('/chatgpt_record', data) }, + code_completion() { + return ws('/api/code_completion') + }, + get_code_completion_enabled_status() { + return http.get<{ enabled: boolean }>('/code_completion/enabled') + }, } export default openai diff --git a/app/src/api/settings.ts b/app/src/api/settings.ts index 9d9dbdd8..90d19288 100644 --- a/app/src/api/settings.ts +++ b/app/src/api/settings.ts @@ -81,6 +81,8 @@ export interface OpenaiSettings { proxy: string token: string api_type: string + enable_code_completion: boolean + code_completion_model: string } export interface TerminalSettings { diff --git a/app/src/components/CodeEditor/CodeCompletion.ts b/app/src/components/CodeEditor/CodeCompletion.ts new file mode 100644 index 00000000..9b4fa896 --- /dev/null +++ b/app/src/components/CodeEditor/CodeCompletion.ts @@ -0,0 +1,224 @@ +import type { Editor } from 'ace-builds' +import type { Point } from 'ace-builds-internal/document' +import openai from '@/api/openai' +import { debounce } from 'lodash' +import { v4 as uuidv4 } from 'uuid' + +// eslint-disable-next-line ts/no-explicit-any +function debug(...args: any[]) { + if (import.meta.env.DEV) { + // eslint-disable-next-line no-console + console.debug(`[CodeEditor]`, ...args) + } +} + +function useCodeCompletion() { + const editorRef = ref() + const currentGhostText = ref('') + + const ws = openai.code_completion() + + function getAISuggestions(code: string, context: string, position: Point, callback: (suggestion: string) => void, language: string = 'nginx', suffix: string = '', requestId: string) { + if (!ws || ws.readyState !== WebSocket.OPEN) { + debug('WebSocket is not open') + return + } + + if (!code.trim()) { + debug('Code is empty') + return + } + + const message = { + context, + code, + suffix, + language, + position, + request_id: requestId, + } + + debug('Sending message', message) + + ws.send(JSON.stringify(message)) + + ws.onmessage = event => { + const data = JSON.parse(event.data) + debug(`Received message`, data, requestId) + if (data.request_id === requestId) { + callback(data.code) + } + } + } + + function applyGhostText() { + if (!editorRef.value) { + debug('Editor instance not available yet') + return + } + + try { + const currentText = editorRef.value.getValue() + const cursorPosition = editorRef.value.getCursorPosition() + + // Get all text before the current cursor position as the code part for the request + const allLines = currentText.split('\n') + const currentLine = allLines[cursorPosition.row] + const textUpToCursor = allLines.slice(0, cursorPosition.row).join('\n') + + (cursorPosition.row > 0 ? '\n' : '') + + currentLine.substring(0, cursorPosition.column) + + // Get text after cursor position as suffix + const textAfterCursor = currentLine.substring(cursorPosition.column) + + (cursorPosition.row < allLines.length - 1 ? '\n' : '') + + allLines.slice(cursorPosition.row + 1).join('\n') + + // Generate new request ID + const requestId = uuidv4() + + // Clear existing ghost text before making the request + clearGhostText() + + // Get AI suggestions + getAISuggestions( + textUpToCursor, + currentText, + cursorPosition, + suggestion => { + debug(`AI suggestions applied: ${suggestion}`) + + // If there's a suggestion, set ghost text + if (suggestion && typeof editorRef.value!.setGhostText === 'function') { + clearGhostText() + + // Get current cursor position (may have changed during async process) + const newPosition = editorRef.value!.getCursorPosition() + + editorRef.value!.setGhostText(suggestion, { + column: newPosition.column, + row: newPosition.row, + }) + debug(`Ghost text set: ${suggestion}`) + currentGhostText.value = suggestion + } + else if (suggestion) { + debug('setGhostText method not available on editor instance') + } + }, + editorRef.value.session.getMode()?.path?.split('/').pop() || 'text', + textAfterCursor, // Pass text after cursor as suffix + requestId, // Pass request ID + ) + } + catch (error) { + debug(`Error in applyGhostText: ${error}`) + } + } + + // Accept the ghost text suggestion with Tab key + function setupTabHandler(editor: Editor) { + if (!editor) { + debug('Editor not available in setupTabHandler') + return + } + + debug('Setting up Tab key handler') + + // Remove existing command to avoid conflicts + const existingCommand = editor.commands.byName.acceptGhostText + if (existingCommand) { + editor.commands.removeCommand(existingCommand) + } + + // Register new Tab key handler command with highest priority + editor.commands.addCommand({ + name: 'acceptGhostText', + bindKey: { win: 'Tab', mac: 'Tab' }, + exec: (editor: Editor) => { + // Use our saved ghost text, not dependent on editor.ghostText + if (currentGhostText.value) { + debug(`Accepting ghost text: ${currentGhostText.value}`) + + const position = editor.getCursorPosition() + const text = currentGhostText.value + + // Insert text through session API + editor.session.insert(position, text) + + clearGhostText() + + debug('Ghost text inserted successfully') + return true // Prevent event propagation + } + + debug('No ghost text to accept, allowing default tab behavior') + return false // Allow default Tab behavior + }, + readOnly: false, + }) + + debug('Tab key handler set up successfully') + } + + // Clear ghost text and reset state + function clearGhostText() { + if (!editorRef.value) + return + + if (typeof editorRef.value.removeGhostText === 'function') { + editorRef.value.removeGhostText() + } + currentGhostText.value = '' + } + + const debouncedApplyGhostText = debounce(applyGhostText, 1000, { leading: false, trailing: true }) + + debug('Editor initialized') + + async function init(editor: Editor) { + const { enabled } = await openai.get_code_completion_enabled_status() + if (!enabled) { + debug('Code completion is not enabled') + return + } + + editorRef.value = editor + + // Set up Tab key handler + setupTabHandler(editor) + + setTimeout(() => { + editor.on('change', (e: { action: string }) => { + debug(`Editor change event: ${e.action}`) + // If change is caused by user input, interrupt current completion + clearGhostText() + + if (e.action === 'insert' || e.action === 'remove') { + // Clear current ghost text + debouncedApplyGhostText() + } + }) + + // Listen for cursor changes, using debounce + editor.selection.on('changeCursor', () => { + debug('Cursor changed') + clearGhostText() + debouncedApplyGhostText() + }) + }, 2000) + } + + function cleanUp() { + if (ws) { + ws.close() + } + debug('CodeCompletion unmounted') + } + + return { + init, + cleanUp, + } +} + +export default useCodeCompletion diff --git a/app/src/components/CodeEditor/CodeEditor.vue b/app/src/components/CodeEditor/CodeEditor.vue index 4491a807..dc393153 100644 --- a/app/src/components/CodeEditor/CodeEditor.vue +++ b/app/src/components/CodeEditor/CodeEditor.vue @@ -1,49 +1,55 @@ @@ -52,4 +58,9 @@ onMounted(() => { z-index: 1; position: relative; } + +:deep(.ace_ghost-text) { + color: #6a737d; + opacity: 0.8; +} diff --git a/app/src/views/preference/OpenAISettings.vue b/app/src/views/preference/OpenAISettings.vue index bf696fb2..7c7982cb 100644 --- a/app/src/views/preference/OpenAISettings.vue +++ b/app/src/views/preference/OpenAISettings.vue @@ -76,6 +76,24 @@ const providers = LLM_PROVIDERS.map(provider => ({ + + + + + + diff --git a/app/src/views/preference/Preference.vue b/app/src/views/preference/Preference.vue index 9137aba8..5c2fe577 100644 --- a/app/src/views/preference/Preference.vue +++ b/app/src/views/preference/Preference.vue @@ -90,6 +90,8 @@ const data = ref({ proxy: '', token: '', api_type: 'OPEN_AI', + enable_code_completion: false, + code_completion_model: '', }, terminal: { start_cmd: '', diff --git a/internal/chatbot/client.go b/internal/llm/client.go similarity index 98% rename from internal/chatbot/client.go rename to internal/llm/client.go index 8c8065ab..31d1bb16 100644 --- a/internal/chatbot/client.go +++ b/internal/llm/client.go @@ -1,4 +1,4 @@ -package chatbot +package llm import ( "github.com/0xJacky/Nginx-UI/internal/transport" diff --git a/internal/llm/code_completion.go b/internal/llm/code_completion.go new file mode 100644 index 00000000..e28d55d8 --- /dev/null +++ b/internal/llm/code_completion.go @@ -0,0 +1,156 @@ +package llm + +import ( + "context" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/0xJacky/Nginx-UI/settings" + "github.com/sashabaranov/go-openai" + "github.com/uozi-tech/cosy/logger" +) + +const ( + MaxTokens = 100 + Temperature = 1 + // Build system prompt and user prompt + SystemPrompt = "You are a code completion assistant. " + + "Complete the provided code snippet based on the context and instruction." + + "[IMPORTANT] Keep the original code indentation." +) + +// Position the cursor position +type Position struct { + Row int `json:"row"` + Column int `json:"column"` +} + +// CodeCompletionRequest the code completion request +type CodeCompletionRequest struct { + RequestID string `json:"request_id"` + UserID uint64 `json:"user_id"` + Context string `json:"context"` + Code string `json:"code"` + Suffix string `json:"suffix"` + Language string `json:"language"` + Position Position `json:"position"` +} + +var ( + requestContext = make(map[uint64]context.CancelFunc) + mutex sync.Mutex +) + +func (c *CodeCompletionRequest) Send() (completedCode string, err error) { + if cancel, ok := requestContext[c.UserID]; ok { + logger.Infof("Code completion request cancelled for user %d", c.UserID) + cancel() + } + + mutex.Lock() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + requestContext[c.UserID] = cancel + mutex.Unlock() + defer func() { + mutex.Lock() + delete(requestContext, c.UserID) + mutex.Unlock() + }() + + openaiClient, err := GetClient() + if err != nil { + return + } + + // Build user prompt with code and instruction + userPrompt := "Here is a file written in " + c.Language + ":\n```\n" + c.Context + "\n```\n" + userPrompt += "I'm editing at row " + strconv.Itoa(c.Position.Row) + ", column " + strconv.Itoa(c.Position.Column) + ".\n" + userPrompt += "Code before cursor:\n```\n" + c.Code + "\n```\n" + + if c.Suffix != "" { + userPrompt += "Code after cursor:\n```\n" + c.Suffix + "\n```\n" + } + + userPrompt += "Instruction: Only provide the completed code that should be inserted at the cursor position without explanations. " + + "The code should be syntactically correct and follow best practices for " + c.Language + "." + + messages := []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: SystemPrompt, + }, + { + Role: openai.ChatMessageRoleUser, + Content: userPrompt, + }, + } + + req := openai.ChatCompletionRequest{ + Model: settings.OpenAISettings.GetCodeCompletionModel(), + Messages: messages, + MaxTokens: MaxTokens, + Temperature: Temperature, + } + + // Make a direct (non-streaming) call to the API + response, err := openaiClient.CreateChatCompletion(ctx, req) + if err != nil { + return + } + + completedCode = response.Choices[0].Message.Content + // extract the last word of the code + lastWord := extractLastWord(c.Code) + completedCode = cleanCompletionResponse(completedCode, lastWord) + logger.Infof("Code completion response: %s", completedCode) + return +} + +// extractLastWord extract the last word of the code +func extractLastWord(code string) string { + if code == "" { + return "" + } + + // define a regex to match word characters (letters, numbers, underscores) + re := regexp.MustCompile(`[a-zA-Z0-9_]+$`) + + // find the last word of the code + match := re.FindString(code) + + return match +} + +// cleanCompletionResponse removes any tags and their content from the completion response +// and strips the already entered code from the completion +func cleanCompletionResponse(response string, lastWord string) (cleanResp string) { + // remove tags and their content using regex + re := regexp.MustCompile(`[\s\S]*?`) + + cleanResp = re.ReplaceAllString(response, "") + + // remove markdown code block tags + codeBlockRegex := regexp.MustCompile("```(?:[a-zA-Z]+)?\n((?:.|\n)*?)\n```") + matches := codeBlockRegex.FindStringSubmatch(cleanResp) + + if len(matches) > 1 { + // extract the code block content + cleanResp = strings.TrimSpace(matches[1]) + } else { + // if no code block is found, keep the original response + cleanResp = strings.TrimSpace(cleanResp) + } + + // remove markdown backticks + cleanResp = strings.Trim(cleanResp, "`") + + // if there is a last word, and the completion result starts with the last word, remove the already entered part + if lastWord != "" && strings.HasPrefix(cleanResp, lastWord) { + cleanResp = cleanResp[len(lastWord):] + } + + return +} diff --git a/internal/chatbot/context.go b/internal/llm/context.go similarity index 99% rename from internal/chatbot/context.go rename to internal/llm/context.go index 560462d3..2daa02a8 100644 --- a/internal/chatbot/context.go +++ b/internal/llm/context.go @@ -1,4 +1,4 @@ -package chatbot +package llm import ( "github.com/0xJacky/Nginx-UI/internal/helper" diff --git a/internal/chatbot/context_test.go b/internal/llm/context_test.go similarity index 96% rename from internal/chatbot/context_test.go rename to internal/llm/context_test.go index 8fc47f68..a99ba4b5 100644 --- a/internal/chatbot/context_test.go +++ b/internal/llm/context_test.go @@ -1,4 +1,4 @@ -package chatbot +package llm import ( "github.com/stretchr/testify/assert" diff --git a/internal/llm/errors.go b/internal/llm/errors.go new file mode 100644 index 00000000..d2541e2b --- /dev/null +++ b/internal/llm/errors.go @@ -0,0 +1,10 @@ +package llm + +import ( + "github.com/uozi-tech/cosy" +) + +var ( + e = cosy.NewErrorScope("llm") + ErrCodeCompletionNotEnabled = e.New(400, "code completion is not enabled") +) diff --git a/internal/chatbot/messages.go b/internal/llm/messages.go similarity index 97% rename from internal/chatbot/messages.go rename to internal/llm/messages.go index b020b126..898725cd 100644 --- a/internal/chatbot/messages.go +++ b/internal/llm/messages.go @@ -1,4 +1,4 @@ -package chatbot +package llm import ( "github.com/sashabaranov/go-openai" diff --git a/internal/chatbot/messages_test.go b/internal/llm/messages_test.go similarity index 96% rename from internal/chatbot/messages_test.go rename to internal/llm/messages_test.go index 0948141f..4ab6685f 100644 --- a/internal/chatbot/messages_test.go +++ b/internal/llm/messages_test.go @@ -1,4 +1,4 @@ -package chatbot +package llm import ( "github.com/sashabaranov/go-openai" diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..ffda952a --- /dev/null +++ b/package-lock.json @@ -0,0 +1,34 @@ +{ + "name": "nginx-ui", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "dependencies": { + "@types/lodash-es": "^4.17.12", + "lodash-es": "^4.17.21" + } + }, + "node_modules/@types/lodash": { + "version": "4.17.16", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.16.tgz", + "integrity": "sha512-HX7Em5NYQAXKW+1T+FiuG27NGwzJfCX3s1GjOa7ujxZa52kjJLOr4FUxT+giF6Tgxv1e+/czV/iTtBw27WTU9g==", + "license": "MIT" + }, + "node_modules/@types/lodash-es": { + "version": "4.17.12", + "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.12.tgz", + "integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==", + "license": "MIT", + "dependencies": { + "@types/lodash": "*" + } + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..fc1a6df3 --- /dev/null +++ b/package.json @@ -0,0 +1,6 @@ +{ + "dependencies": { + "@types/lodash-es": "^4.17.12", + "lodash-es": "^4.17.21" + } +} diff --git a/settings/openai.go b/settings/openai.go index fc755978..fc7aae03 100644 --- a/settings/openai.go +++ b/settings/openai.go @@ -3,13 +3,22 @@ package settings import "github.com/sashabaranov/go-openai" type OpenAI struct { - BaseUrl string `json:"base_url" binding:"omitempty,url"` - Token string `json:"token" binding:"omitempty,safety_text"` - Proxy string `json:"proxy" binding:"omitempty,url"` - Model string `json:"model" binding:"omitempty,safety_text"` - APIType string `json:"api_type" binding:"omitempty,oneof=OPEN_AI AZURE"` + BaseUrl string `json:"base_url" binding:"omitempty,url"` + Token string `json:"token" binding:"omitempty,safety_text"` + Proxy string `json:"proxy" binding:"omitempty,url"` + Model string `json:"model" binding:"omitempty,safety_text"` + APIType string `json:"api_type" binding:"omitempty,oneof=OPEN_AI AZURE"` + EnableCodeCompletion bool `json:"enable_code_completion" binding:"omitempty"` + CodeCompletionModel string `json:"code_completion_model" binding:"omitempty,safety_text"` } var OpenAISettings = &OpenAI{ APIType: string(openai.APITypeOpenAI), } + +func (o *OpenAI) GetCodeCompletionModel() string { + if o.CodeCompletionModel == "" { + return o.Model + } + return o.CodeCompletionModel +}