feat(wip): code completion with llm

This commit is contained in:
Jacky 2025-04-15 21:54:51 +08:00
parent 63fb823344
commit a57748a432
No known key found for this signature in database
GPG key ID: 215C21B10DF38B4D
22 changed files with 623 additions and 31 deletions

View file

@ -0,0 +1,82 @@
package openai
import (
"net/http"
"sync"
"time"
"github.com/0xJacky/Nginx-UI/api"
"github.com/0xJacky/Nginx-UI/internal/helper"
"github.com/0xJacky/Nginx-UI/internal/llm"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"github.com/uozi-tech/cosy"
"github.com/uozi-tech/cosy/logger"
)
var mutex sync.Mutex
// CodeCompletion handles code completion requests
func CodeCompletion(c *gin.Context) {
if !settings.OpenAISettings.EnableCodeCompletion {
cosy.ErrHandler(c, llm.ErrCodeCompletionNotEnabled)
return
}
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
ws, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
cosy.ErrHandler(c, err)
return
}
defer ws.Close()
for {
var codeCompletionRequest llm.CodeCompletionRequest
err := ws.ReadJSON(&codeCompletionRequest)
if err != nil {
if helper.IsUnexpectedWebsocketError(err) {
logger.Errorf("Error reading JSON: %v", err)
}
return
}
codeCompletionRequest.UserID = api.CurrentUser(c).ID
go func() {
start := time.Now()
completedCode, err := codeCompletionRequest.Send()
if err != nil {
logger.Errorf("Error sending code completion request: %v", err)
return
}
elapsed := time.Since(start)
mutex.Lock()
defer mutex.Unlock()
err = ws.WriteJSON(gin.H{
"code": completedCode,
"request_id": codeCompletionRequest.RequestID,
"completion_ms": elapsed.Milliseconds(),
})
if err != nil {
if helper.IsUnexpectedWebsocketError(err) {
logger.Errorf("Error writing JSON: %v", err)
}
return
}
}()
}
}
func GetCodeCompletionEnabledStatus(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"enabled": settings.OpenAISettings.EnableCodeCompletion,
})
}

View file

@ -4,15 +4,16 @@ import (
"context"
"errors"
"fmt"
"github.com/0xJacky/Nginx-UI/internal/chatbot"
"io"
"strings"
"time"
"github.com/0xJacky/Nginx-UI/internal/llm"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/sashabaranov/go-openai"
"github.com/uozi-tech/cosy"
"github.com/uozi-tech/cosy/logger"
"io"
"strings"
"time"
)
const ChatGPTInitPrompt = `You are a assistant who can help users write and optimise the configurations of Nginx,
@ -41,7 +42,7 @@ func MakeChatCompletionRequest(c *gin.Context) {
messages = append(messages, json.Messages...)
if json.Filepath != "" {
messages = chatbot.ChatCompletionWithContext(json.Filepath, messages)
messages = llm.ChatCompletionWithContext(json.Filepath, messages)
}
// SSE server
@ -50,7 +51,7 @@ func MakeChatCompletionRequest(c *gin.Context) {
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
openaiClient, err := chatbot.GetClient()
openaiClient, err := llm.GetClient()
if err != nil {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{

View file

@ -6,4 +6,7 @@ func InitRouter(r *gin.RouterGroup) {
// ChatGPT
r.POST("chatgpt", MakeChatCompletionRequest)
r.POST("chatgpt_record", StoreChatGPTRecord)
// Code Completion
r.GET("code_completion", CodeCompletion)
r.GET("code_completion/enabled", GetCodeCompletionEnabledStatus)
}

View file

@ -70,6 +70,8 @@ Token =
Proxy =
Model = gpt-4o
APIType =
EnableCodeCompletion = false
CodeCompletionModel = gpt-4o-mini
[terminal]
StartCmd = bash

View file

@ -43,6 +43,7 @@
"sse.js": "^2.6.0",
"universal-cookie": "^8.0.1",
"unocss": "^66.0.0",
"uuid": "^11.1.0",
"vite-plugin-build-id": "0.5.0",
"vue": "^3.5.13",
"vue-dompurify-html": "^5.2.0",

9
app/pnpm-lock.yaml generated
View file

@ -98,6 +98,9 @@ importers:
unocss:
specifier: ^66.0.0
version: 66.0.0(postcss@8.5.3)(vite@6.2.6(@types/node@22.14.0)(jiti@2.4.2)(less@4.3.0)(tsx@4.19.2)(yaml@2.7.1))(vue@3.5.13(typescript@5.8.3))
uuid:
specifier: ^11.1.0
version: 11.1.0
vite-plugin-build-id:
specifier: 0.5.0
version: 0.5.0
@ -3842,6 +3845,10 @@ packages:
util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
uuid@11.1.0:
resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==}
hasBin: true
validate-npm-package-license@3.0.4:
resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==}
@ -8217,6 +8224,8 @@ snapshots:
util-deprecate@1.0.2: {}
uuid@11.1.0: {}
validate-npm-package-license@3.0.4:
dependencies:
spdx-correct: 3.2.0

View file

@ -1,4 +1,5 @@
import http from '@/lib/http'
import ws from '@/lib/websocket'
export interface ChatComplicationMessage {
role: string
@ -6,10 +7,31 @@ export interface ChatComplicationMessage {
name?: string
}
export interface CodeCompletionRequest {
context: string // Context of the code
code: string // Code before the cursor
suffix?: string // Code after the cursor
language?: string // Programming language
position?: { // Cursor position
row: number
column: number
}
}
export interface CodeCompletionResponse {
code: string // Completed code
}
const openai = {
store_record(data: { file_name?: string, messages?: ChatComplicationMessage[] }) {
return http.post('/chatgpt_record', data)
},
code_completion() {
return ws('/api/code_completion')
},
get_code_completion_enabled_status() {
return http.get<{ enabled: boolean }>('/code_completion/enabled')
},
}
export default openai

View file

@ -81,6 +81,8 @@ export interface OpenaiSettings {
proxy: string
token: string
api_type: string
enable_code_completion: boolean
code_completion_model: string
}
export interface TerminalSettings {

View file

@ -0,0 +1,224 @@
import type { Editor } from 'ace-builds'
import type { Point } from 'ace-builds-internal/document'
import openai from '@/api/openai'
import { debounce } from 'lodash'
import { v4 as uuidv4 } from 'uuid'
// eslint-disable-next-line ts/no-explicit-any
function debug(...args: any[]) {
if (import.meta.env.DEV) {
// eslint-disable-next-line no-console
console.debug(`[CodeEditor]`, ...args)
}
}
function useCodeCompletion() {
const editorRef = ref<Editor>()
const currentGhostText = ref<string>('')
const ws = openai.code_completion()
function getAISuggestions(code: string, context: string, position: Point, callback: (suggestion: string) => void, language: string = 'nginx', suffix: string = '', requestId: string) {
if (!ws || ws.readyState !== WebSocket.OPEN) {
debug('WebSocket is not open')
return
}
if (!code.trim()) {
debug('Code is empty')
return
}
const message = {
context,
code,
suffix,
language,
position,
request_id: requestId,
}
debug('Sending message', message)
ws.send(JSON.stringify(message))
ws.onmessage = event => {
const data = JSON.parse(event.data)
debug(`Received message`, data, requestId)
if (data.request_id === requestId) {
callback(data.code)
}
}
}
function applyGhostText() {
if (!editorRef.value) {
debug('Editor instance not available yet')
return
}
try {
const currentText = editorRef.value.getValue()
const cursorPosition = editorRef.value.getCursorPosition()
// Get all text before the current cursor position as the code part for the request
const allLines = currentText.split('\n')
const currentLine = allLines[cursorPosition.row]
const textUpToCursor = allLines.slice(0, cursorPosition.row).join('\n')
+ (cursorPosition.row > 0 ? '\n' : '')
+ currentLine.substring(0, cursorPosition.column)
// Get text after cursor position as suffix
const textAfterCursor = currentLine.substring(cursorPosition.column)
+ (cursorPosition.row < allLines.length - 1 ? '\n' : '')
+ allLines.slice(cursorPosition.row + 1).join('\n')
// Generate new request ID
const requestId = uuidv4()
// Clear existing ghost text before making the request
clearGhostText()
// Get AI suggestions
getAISuggestions(
textUpToCursor,
currentText,
cursorPosition,
suggestion => {
debug(`AI suggestions applied: ${suggestion}`)
// If there's a suggestion, set ghost text
if (suggestion && typeof editorRef.value!.setGhostText === 'function') {
clearGhostText()
// Get current cursor position (may have changed during async process)
const newPosition = editorRef.value!.getCursorPosition()
editorRef.value!.setGhostText(suggestion, {
column: newPosition.column,
row: newPosition.row,
})
debug(`Ghost text set: ${suggestion}`)
currentGhostText.value = suggestion
}
else if (suggestion) {
debug('setGhostText method not available on editor instance')
}
},
editorRef.value.session.getMode()?.path?.split('/').pop() || 'text',
textAfterCursor, // Pass text after cursor as suffix
requestId, // Pass request ID
)
}
catch (error) {
debug(`Error in applyGhostText: ${error}`)
}
}
// Accept the ghost text suggestion with Tab key
function setupTabHandler(editor: Editor) {
if (!editor) {
debug('Editor not available in setupTabHandler')
return
}
debug('Setting up Tab key handler')
// Remove existing command to avoid conflicts
const existingCommand = editor.commands.byName.acceptGhostText
if (existingCommand) {
editor.commands.removeCommand(existingCommand)
}
// Register new Tab key handler command with highest priority
editor.commands.addCommand({
name: 'acceptGhostText',
bindKey: { win: 'Tab', mac: 'Tab' },
exec: (editor: Editor) => {
// Use our saved ghost text, not dependent on editor.ghostText
if (currentGhostText.value) {
debug(`Accepting ghost text: ${currentGhostText.value}`)
const position = editor.getCursorPosition()
const text = currentGhostText.value
// Insert text through session API
editor.session.insert(position, text)
clearGhostText()
debug('Ghost text inserted successfully')
return true // Prevent event propagation
}
debug('No ghost text to accept, allowing default tab behavior')
return false // Allow default Tab behavior
},
readOnly: false,
})
debug('Tab key handler set up successfully')
}
// Clear ghost text and reset state
function clearGhostText() {
if (!editorRef.value)
return
if (typeof editorRef.value.removeGhostText === 'function') {
editorRef.value.removeGhostText()
}
currentGhostText.value = ''
}
const debouncedApplyGhostText = debounce(applyGhostText, 1000, { leading: false, trailing: true })
debug('Editor initialized')
async function init(editor: Editor) {
const { enabled } = await openai.get_code_completion_enabled_status()
if (!enabled) {
debug('Code completion is not enabled')
return
}
editorRef.value = editor
// Set up Tab key handler
setupTabHandler(editor)
setTimeout(() => {
editor.on('change', (e: { action: string }) => {
debug(`Editor change event: ${e.action}`)
// If change is caused by user input, interrupt current completion
clearGhostText()
if (e.action === 'insert' || e.action === 'remove') {
// Clear current ghost text
debouncedApplyGhostText()
}
})
// Listen for cursor changes, using debounce
editor.selection.on('changeCursor', () => {
debug('Cursor changed')
clearGhostText()
debouncedApplyGhostText()
})
}, 2000)
}
function cleanUp() {
if (ws) {
ws.close()
}
debug('CodeCompletion unmounted')
}
return {
init,
cleanUp,
}
}
export default useCodeCompletion

View file

@ -1,49 +1,55 @@
<script setup lang="ts">
import type { Editor } from 'ace-builds'
import ace from 'ace-builds'
import extSearchboxUrl from 'ace-builds/src-noconflict/ext-searchbox?url'
import { VAceEditor } from 'vue3-ace-editor'
import useCodeCompletion from './CodeCompletion'
import 'ace-builds/src-noconflict/mode-nginx'
import 'ace-builds/src-noconflict/theme-monokai'
const props = defineProps<{
content?: string
defaultHeight?: string
readonly?: boolean
placeholder?: string
}>()
const emit = defineEmits(['update:content'])
const value = computed({
get() {
return props.content ?? ''
},
set(v) {
emit('update:content', v)
},
})
const content = defineModel<string>('content', { default: '' })
onMounted(() => {
try {
ace.config.setModuleUrl('ace/ext/searchbox', extSearchboxUrl)
}
catch (error) {
console.error('Failed to initialize Ace editor:', error)
console.error(`Failed to initialize Ace editor: ${error}`)
}
})
const codeCompletion = useCodeCompletion()
function init(editor: Editor) {
if (props.readonly) {
return
}
codeCompletion.init(editor)
}
onUnmounted(() => {
codeCompletion.cleanUp()
})
</script>
<template>
<VAceEditor
v-model:value="value"
v-model:value="content"
lang="nginx"
theme="monokai"
:style="{
minHeight: defaultHeight || '100vh',
borderRadius: '5px',
}"
:readonly="readonly"
:placeholder="placeholder"
:readonly
:placeholder
@init="init"
/>
</template>
@ -52,4 +58,9 @@ onMounted(() => {
z-index: 1;
position: relative;
}
:deep(.ace_ghost-text) {
color: #6a737d;
opacity: 0.8;
}
</style>

View file

@ -76,6 +76,24 @@ const providers = LLM_PROVIDERS.map(provider => ({
</ASelectOption>
</ASelect>
</AFormItem>
<AFormItem
:label="$gettext('Enable Code Completion')"
>
<ASwitch v-model:checked="data.openai.enable_code_completion" />
</AFormItem>
<AFormItem
v-if="data.openai.enable_code_completion"
:label="$gettext('Code Completion Model')"
:validate-status="errors?.openai?.code_completion_model ? 'error' : ''"
:help="errors?.openai?.code_completion_model === 'safety_text'
? $gettext('The model name should only contain letters, unicode, numbers, hyphens, dashes, colons, and dots.')
: $gettext('The model used for code completion, if not set, the chat model will be used.')"
>
<AAutoComplete
v-model:value="data.openai.code_completion_model"
:options="models"
/>
</AFormItem>
</AForm>
</template>

View file

@ -90,6 +90,8 @@ const data = ref<Settings>({
proxy: '',
token: '',
api_type: 'OPEN_AI',
enable_code_completion: false,
code_completion_model: '',
},
terminal: {
start_cmd: '',

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/0xJacky/Nginx-UI/internal/transport"

View file

@ -0,0 +1,156 @@
package llm
import (
"context"
"regexp"
"strconv"
"strings"
"sync"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/sashabaranov/go-openai"
"github.com/uozi-tech/cosy/logger"
)
const (
MaxTokens = 100
Temperature = 1
// Build system prompt and user prompt
SystemPrompt = "You are a code completion assistant. " +
"Complete the provided code snippet based on the context and instruction." +
"[IMPORTANT] Keep the original code indentation."
)
// Position the cursor position
type Position struct {
Row int `json:"row"`
Column int `json:"column"`
}
// CodeCompletionRequest the code completion request
type CodeCompletionRequest struct {
RequestID string `json:"request_id"`
UserID uint64 `json:"user_id"`
Context string `json:"context"`
Code string `json:"code"`
Suffix string `json:"suffix"`
Language string `json:"language"`
Position Position `json:"position"`
}
var (
requestContext = make(map[uint64]context.CancelFunc)
mutex sync.Mutex
)
func (c *CodeCompletionRequest) Send() (completedCode string, err error) {
if cancel, ok := requestContext[c.UserID]; ok {
logger.Infof("Code completion request cancelled for user %d", c.UserID)
cancel()
}
mutex.Lock()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
requestContext[c.UserID] = cancel
mutex.Unlock()
defer func() {
mutex.Lock()
delete(requestContext, c.UserID)
mutex.Unlock()
}()
openaiClient, err := GetClient()
if err != nil {
return
}
// Build user prompt with code and instruction
userPrompt := "Here is a file written in " + c.Language + ":\n```\n" + c.Context + "\n```\n"
userPrompt += "I'm editing at row " + strconv.Itoa(c.Position.Row) + ", column " + strconv.Itoa(c.Position.Column) + ".\n"
userPrompt += "Code before cursor:\n```\n" + c.Code + "\n```\n"
if c.Suffix != "" {
userPrompt += "Code after cursor:\n```\n" + c.Suffix + "\n```\n"
}
userPrompt += "Instruction: Only provide the completed code that should be inserted at the cursor position without explanations. " +
"The code should be syntactically correct and follow best practices for " + c.Language + "."
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: SystemPrompt,
},
{
Role: openai.ChatMessageRoleUser,
Content: userPrompt,
},
}
req := openai.ChatCompletionRequest{
Model: settings.OpenAISettings.GetCodeCompletionModel(),
Messages: messages,
MaxTokens: MaxTokens,
Temperature: Temperature,
}
// Make a direct (non-streaming) call to the API
response, err := openaiClient.CreateChatCompletion(ctx, req)
if err != nil {
return
}
completedCode = response.Choices[0].Message.Content
// extract the last word of the code
lastWord := extractLastWord(c.Code)
completedCode = cleanCompletionResponse(completedCode, lastWord)
logger.Infof("Code completion response: %s", completedCode)
return
}
// extractLastWord extract the last word of the code
func extractLastWord(code string) string {
if code == "" {
return ""
}
// define a regex to match word characters (letters, numbers, underscores)
re := regexp.MustCompile(`[a-zA-Z0-9_]+$`)
// find the last word of the code
match := re.FindString(code)
return match
}
// cleanCompletionResponse removes any <think></think> tags and their content from the completion response
// and strips the already entered code from the completion
func cleanCompletionResponse(response string, lastWord string) (cleanResp string) {
// remove <think></think> tags and their content using regex
re := regexp.MustCompile(`<think>[\s\S]*?</think>`)
cleanResp = re.ReplaceAllString(response, "")
// remove markdown code block tags
codeBlockRegex := regexp.MustCompile("```(?:[a-zA-Z]+)?\n((?:.|\n)*?)\n```")
matches := codeBlockRegex.FindStringSubmatch(cleanResp)
if len(matches) > 1 {
// extract the code block content
cleanResp = strings.TrimSpace(matches[1])
} else {
// if no code block is found, keep the original response
cleanResp = strings.TrimSpace(cleanResp)
}
// remove markdown backticks
cleanResp = strings.Trim(cleanResp, "`")
// if there is a last word, and the completion result starts with the last word, remove the already entered part
if lastWord != "" && strings.HasPrefix(cleanResp, lastWord) {
cleanResp = cleanResp[len(lastWord):]
}
return
}

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/0xJacky/Nginx-UI/internal/helper"

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/stretchr/testify/assert"

10
internal/llm/errors.go Normal file
View file

@ -0,0 +1,10 @@
package llm
import (
"github.com/uozi-tech/cosy"
)
var (
e = cosy.NewErrorScope("llm")
ErrCodeCompletionNotEnabled = e.New(400, "code completion is not enabled")
)

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/sashabaranov/go-openai"

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/sashabaranov/go-openai"

34
package-lock.json generated Normal file
View file

@ -0,0 +1,34 @@
{
"name": "nginx-ui",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"@types/lodash-es": "^4.17.12",
"lodash-es": "^4.17.21"
}
},
"node_modules/@types/lodash": {
"version": "4.17.16",
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.16.tgz",
"integrity": "sha512-HX7Em5NYQAXKW+1T+FiuG27NGwzJfCX3s1GjOa7ujxZa52kjJLOr4FUxT+giF6Tgxv1e+/czV/iTtBw27WTU9g==",
"license": "MIT"
},
"node_modules/@types/lodash-es": {
"version": "4.17.12",
"resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.12.tgz",
"integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==",
"license": "MIT",
"dependencies": {
"@types/lodash": "*"
}
},
"node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"license": "MIT"
}
}
}

6
package.json Normal file
View file

@ -0,0 +1,6 @@
{
"dependencies": {
"@types/lodash-es": "^4.17.12",
"lodash-es": "^4.17.21"
}
}

View file

@ -8,8 +8,17 @@ type OpenAI struct {
Proxy string `json:"proxy" binding:"omitempty,url"`
Model string `json:"model" binding:"omitempty,safety_text"`
APIType string `json:"api_type" binding:"omitempty,oneof=OPEN_AI AZURE"`
EnableCodeCompletion bool `json:"enable_code_completion" binding:"omitempty"`
CodeCompletionModel string `json:"code_completion_model" binding:"omitempty,safety_text"`
}
var OpenAISettings = &OpenAI{
APIType: string(openai.APITypeOpenAI),
}
func (o *OpenAI) GetCodeCompletionModel() string {
if o.CodeCompletionModel == "" {
return o.Model
}
return o.CodeCompletionModel
}