Merge pull request #962 from 0xJacky/feat/code-completion

feat(code-editor): LLM code completion
This commit is contained in:
Jacky 2025-04-16 16:11:00 +08:00 committed by GitHub
commit 65e5037a87
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
39 changed files with 709 additions and 175 deletions

View file

@ -33,7 +33,7 @@ jobs:
- name: Set up nodejs
uses: actions/setup-node@v4
with:
node-version: 21.x
node-version: 23.x
- name: Install dependencies
run: |
@ -52,8 +52,8 @@ jobs:
name: dist
path: docs/.vitepress/dist
- name: Deploy to server
if: github.event_name != 'pull_request'
- name: Deploy
if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main'
uses: cloudflare/wrangler-action@v3
with:
apiToken: ${{ secrets.CF_API_TOKEN }}

View file

@ -134,6 +134,7 @@ Para más información: [debian/conf/nginx.conf](https://salsa.debian.org/nginx-
La UI de Nginx está disponible en las siguientes plataformas:
- macOS 11 Big Sur y posterior (amd64 / arm64)
- Windows 10 y posterior (x86 /amd64 / arm64)
- Linux 2.6.23 y posterior (x86 / amd64 / arm64 / armv5 / armv6 / armv7 / mips32 / mips64 / riscv64 / loongarch64)
- Incluyendo pero no limitado a Debian 7 / 8, Ubuntu 12.04 / 14.04 y posterior, CentOS 6 / 7, Arch Linux
- FreeBSD

View file

@ -148,6 +148,7 @@ http {
Giao diện người dùng Nginx có sẵn trên các nền tảng sau:
- macOS 11 Big Sur and later (amd64 / arm64)
- Windows 10 and later (x86 /amd64 / arm64)
- Linux 2.6.23 và sau đó (x86 / amd64 / arm64 / armv5 / armv6 / armv7 / mips32 / mips64 / riscv64 / loongarch64)
- Bao gồm nhưng không giới hạn Debian 7/8, Ubuntu 12.04/14.04 trở lên, CentOS 6/7, Arch Linux
- FreeBSD

View file

@ -78,7 +78,7 @@ Nginx 网络管理界面,由 [0xJacky](https://jackyu.cn/)、[Hintay](https://
- 导出加密的 Nginx / Nginx UI 配置,方便快速部署和恢复到新环境
- 增强版在线 ChatGPT 助手,支持多种模型,包括显示 Deepseek-R1 的思考链,帮助您更好地理解和优化配置
- 一键申请和自动续签 Let's encrypt 证书
- 在线编辑 Nginx 配置文件,编辑器支持 Nginx 配置语法高亮
- 在线编辑 Nginx 配置文件,编辑器支持**大模型代码补全**和 Nginx 配置语法高亮
- 在线查看 Nginx 日志
- 使用 Go 和 Vue 开发,发行版本为单个可执行的二进制文件
- 保存配置后自动测试配置文件并重载 Nginx
@ -130,6 +130,7 @@ http {
Nginx UI 可在以下平台中使用:
- macOS 11 Big Sur 及之后版本amd64 / arm64
- Windows 10 及之后版本x86 /amd64 / arm64
- Linux 2.6.23 及之后版本x86 / amd64 / arm64 / armv5 / armv6 / armv7 / mips32 / mips64 / riscv64 / loongarch64
- 包括但不限于 Debian 7 / 8、Ubuntu 12.04 / 14.04 及后续版本、CentOS 6 / 7、Arch Linux
- FreeBSD

View file

@ -80,7 +80,7 @@ Nginx 網路管理介面,由 [0xJacky](https://jackyu.cn/)、[Hintay](https://
- 匯出加密的 Nginx/NginxUI 設定,方便快速部署和恢復到新環境
- 增強版線上 ChatGPT 助手,支援多種模型,包括顯示 Deepseek-R1 的思考鏈,幫助您更好地理解和最佳化設定
- 一鍵申請和自動續簽 Let's encrypt 憑證
- 線上編輯 Nginx 設定檔,編輯器支援 Nginx 設定語法醒目提示
- 線上編輯 Nginx 設定檔,編輯器支援**大模型代碼補全**和 Nginx 設定語法醒目提示
- 線上檢視 Nginx 日誌
- 使用 Go 和 Vue 開發,發行版本為單個可執行檔案
- 儲存設定後自動測試設定檔並重新載入 Nginx
@ -133,6 +133,7 @@ http {
Nginx UI 可在以下作業系統中使用:
- macOS 11 Big Sur 及之後版本amd64 / arm64
- Windows 10 及之後版本x86 /amd64 / arm64
- Linux 2.6.23 及之後版本x86 / amd64 / arm64 / armv5 / armv6 / armv7 / mips32 / mips64 / riscv64 / loongarch64
- 包括但不限於 Debian 7 / 8、Ubuntu 12.04 / 14.04 及後續版本、CentOS 6 / 7、Arch Linux
- FreeBSD

View file

@ -95,7 +95,7 @@ URL[https://demo.nginxui.com](https://demo.nginxui.com)
- Export encrypted Nginx / Nginx UI configurations for quick deployment and recovery to new environments
- Enhanced online ChatGPT assistant supporting multiple models, including Deepseek-R1's chain-of-thought display to help you better understand and optimize configurations
- One-click deployment and automatic renewal Let's Encrypt certificates.
- Online editing websites configurations with our self-designed **NgxConfigEditor** which is a user-friendly block editor for nginx configurations or **Ace Code Editor** which supports highlighting nginx configuration syntax.
- Online editing websites configurations with our self-designed **NgxConfigEditor** which is a user-friendly block editor for nginx configurations or **Ace Code Editor** which supports **LLM Code Completion** and highlighting nginx configuration syntax.
- Online view Nginx logs
- Written in Go and Vue, distribution is a single executable binary.
- Automatically test configuration file and reload nginx after saving configuration.
@ -152,6 +152,7 @@ For more information: [debian/conf/nginx.conf](https://salsa.debian.org/nginx-te
Nginx UI is available on the following platforms:
- macOS 11 Big Sur and later (amd64 / arm64)
- Windows 10 and later (amd64 / arm64)
- Linux 2.6.23 and later (x86 / amd64 / arm64 / armv5 / armv6 / armv7 / mips32 / mips64 / riscv64 / loongarch64)
- Including but not limited to Debian 7 / 8, Ubuntu 12.04 / 14.04 and later, CentOS 6 / 7, Arch Linux
- FreeBSD

View file

@ -0,0 +1,82 @@
package openai
import (
"net/http"
"sync"
"time"
"github.com/0xJacky/Nginx-UI/api"
"github.com/0xJacky/Nginx-UI/internal/helper"
"github.com/0xJacky/Nginx-UI/internal/llm"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"github.com/uozi-tech/cosy"
"github.com/uozi-tech/cosy/logger"
)
var mutex sync.Mutex
// CodeCompletion handles code completion requests
func CodeCompletion(c *gin.Context) {
if !settings.OpenAISettings.EnableCodeCompletion {
cosy.ErrHandler(c, llm.ErrCodeCompletionNotEnabled)
return
}
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
ws, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
cosy.ErrHandler(c, err)
return
}
defer ws.Close()
for {
var codeCompletionRequest llm.CodeCompletionRequest
err := ws.ReadJSON(&codeCompletionRequest)
if err != nil {
if helper.IsUnexpectedWebsocketError(err) {
logger.Errorf("Error reading JSON: %v", err)
}
return
}
codeCompletionRequest.UserID = api.CurrentUser(c).ID
go func() {
start := time.Now()
completedCode, err := codeCompletionRequest.Send()
if err != nil {
logger.Errorf("Error sending code completion request: %v", err)
return
}
elapsed := time.Since(start)
mutex.Lock()
defer mutex.Unlock()
err = ws.WriteJSON(gin.H{
"code": completedCode,
"request_id": codeCompletionRequest.RequestID,
"completion_ms": elapsed.Milliseconds(),
})
if err != nil {
if helper.IsUnexpectedWebsocketError(err) {
logger.Errorf("Error writing JSON: %v", err)
}
return
}
}()
}
}
func GetCodeCompletionEnabledStatus(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"enabled": settings.OpenAISettings.EnableCodeCompletion,
})
}

View file

@ -4,15 +4,16 @@ import (
"context"
"errors"
"fmt"
"github.com/0xJacky/Nginx-UI/internal/chatbot"
"io"
"strings"
"time"
"github.com/0xJacky/Nginx-UI/internal/llm"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/gin-gonic/gin"
"github.com/sashabaranov/go-openai"
"github.com/uozi-tech/cosy"
"github.com/uozi-tech/cosy/logger"
"io"
"strings"
"time"
)
const ChatGPTInitPrompt = `You are a assistant who can help users write and optimise the configurations of Nginx,
@ -41,7 +42,7 @@ func MakeChatCompletionRequest(c *gin.Context) {
messages = append(messages, json.Messages...)
if json.Filepath != "" {
messages = chatbot.ChatCompletionWithContext(json.Filepath, messages)
messages = llm.ChatCompletionWithContext(json.Filepath, messages)
}
// SSE server
@ -50,7 +51,7 @@ func MakeChatCompletionRequest(c *gin.Context) {
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
openaiClient, err := chatbot.GetClient()
openaiClient, err := llm.GetClient()
if err != nil {
c.Stream(func(w io.Writer) bool {
c.SSEvent("message", gin.H{

View file

@ -6,4 +6,7 @@ func InitRouter(r *gin.RouterGroup) {
// ChatGPT
r.POST("chatgpt", MakeChatCompletionRequest)
r.POST("chatgpt_record", StoreChatGPTRecord)
// Code Completion
r.GET("code_completion", CodeCompletion)
r.GET("code_completion/enabled", GetCodeCompletionEnabledStatus)
}

View file

@ -70,6 +70,8 @@ Token =
Proxy =
Model = gpt-4o
APIType =
EnableCodeCompletion = false
CodeCompletionModel = gpt-4o-mini
[terminal]
StartCmd = bash

View file

@ -43,6 +43,7 @@
"sse.js": "^2.6.0",
"universal-cookie": "^8.0.1",
"unocss": "^66.0.0",
"uuid": "^11.1.0",
"vite-plugin-build-id": "0.5.0",
"vue": "^3.5.13",
"vue-dompurify-html": "^5.2.0",

9
app/pnpm-lock.yaml generated
View file

@ -98,6 +98,9 @@ importers:
unocss:
specifier: ^66.0.0
version: 66.0.0(postcss@8.5.3)(vite@6.2.6(@types/node@22.14.0)(jiti@2.4.2)(less@4.3.0)(tsx@4.19.2)(yaml@2.7.1))(vue@3.5.13(typescript@5.8.3))
uuid:
specifier: ^11.1.0
version: 11.1.0
vite-plugin-build-id:
specifier: 0.5.0
version: 0.5.0
@ -3842,6 +3845,10 @@ packages:
util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
uuid@11.1.0:
resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==}
hasBin: true
validate-npm-package-license@3.0.4:
resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==}
@ -8217,6 +8224,8 @@ snapshots:
util-deprecate@1.0.2: {}
uuid@11.1.0: {}
validate-npm-package-license@3.0.4:
dependencies:
spdx-correct: 3.2.0

View file

@ -1,4 +1,5 @@
import http from '@/lib/http'
import ws from '@/lib/websocket'
export interface ChatComplicationMessage {
role: string
@ -6,10 +7,31 @@ export interface ChatComplicationMessage {
name?: string
}
export interface CodeCompletionRequest {
context: string // Context of the code
code: string // Code before the cursor
suffix?: string // Code after the cursor
language?: string // Programming language
position?: { // Cursor position
row: number
column: number
}
}
export interface CodeCompletionResponse {
code: string // Completed code
}
const openai = {
store_record(data: { file_name?: string, messages?: ChatComplicationMessage[] }) {
return http.post('/chatgpt_record', data)
},
code_completion() {
return ws('/api/code_completion')
},
get_code_completion_enabled_status() {
return http.get<{ enabled: boolean }>('/code_completion/enabled')
},
}
export default openai

View file

@ -81,6 +81,8 @@ export interface OpenaiSettings {
proxy: string
token: string
api_type: string
enable_code_completion: boolean
code_completion_model: string
}
export interface TerminalSettings {

View file

@ -0,0 +1,224 @@
import type { Editor } from 'ace-builds'
import type { Point } from 'ace-builds-internal/document'
import openai from '@/api/openai'
import { debounce } from 'lodash'
import { v4 as uuidv4 } from 'uuid'
// eslint-disable-next-line ts/no-explicit-any
function debug(...args: any[]) {
if (import.meta.env.DEV) {
// eslint-disable-next-line no-console
console.debug(`[CodeEditor]`, ...args)
}
}
function useCodeCompletion() {
const editorRef = ref<Editor>()
const currentGhostText = ref<string>('')
const ws = openai.code_completion()
function getAISuggestions(code: string, context: string, position: Point, callback: (suggestion: string) => void, language: string = 'nginx', suffix: string = '', requestId: string) {
if (!ws || ws.readyState !== WebSocket.OPEN) {
debug('WebSocket is not open')
return
}
if (!code.trim()) {
debug('Code is empty')
return
}
const message = {
context,
code,
suffix,
language,
position,
request_id: requestId,
}
debug('Sending message', message)
ws.send(JSON.stringify(message))
ws.onmessage = event => {
const data = JSON.parse(event.data)
debug(`Received message`, data, requestId)
if (data.request_id === requestId) {
callback(data.code)
}
}
}
function applyGhostText() {
if (!editorRef.value) {
debug('Editor instance not available yet')
return
}
try {
const currentText = editorRef.value.getValue()
const cursorPosition = editorRef.value.getCursorPosition()
// Get all text before the current cursor position as the code part for the request
const allLines = currentText.split('\n')
const currentLine = allLines[cursorPosition.row]
const textUpToCursor = allLines.slice(0, cursorPosition.row).join('\n')
+ (cursorPosition.row > 0 ? '\n' : '')
+ currentLine.substring(0, cursorPosition.column)
// Get text after cursor position as suffix
const textAfterCursor = currentLine.substring(cursorPosition.column)
+ (cursorPosition.row < allLines.length - 1 ? '\n' : '')
+ allLines.slice(cursorPosition.row + 1).join('\n')
// Generate new request ID
const requestId = uuidv4()
// Clear existing ghost text before making the request
clearGhostText()
// Get AI suggestions
getAISuggestions(
textUpToCursor,
currentText,
cursorPosition,
suggestion => {
debug(`AI suggestions applied: ${suggestion}`)
// If there's a suggestion, set ghost text
if (suggestion && typeof editorRef.value!.setGhostText === 'function') {
clearGhostText()
// Get current cursor position (may have changed during async process)
const newPosition = editorRef.value!.getCursorPosition()
editorRef.value!.setGhostText(suggestion, {
column: newPosition.column,
row: newPosition.row,
})
debug(`Ghost text set: ${suggestion}`)
currentGhostText.value = suggestion
}
else if (suggestion) {
debug('setGhostText method not available on editor instance')
}
},
editorRef.value.session.getMode()?.path?.split('/').pop() || 'text',
textAfterCursor, // Pass text after cursor as suffix
requestId, // Pass request ID
)
}
catch (error) {
debug(`Error in applyGhostText: ${error}`)
}
}
// Accept the ghost text suggestion with Tab key
function setupTabHandler(editor: Editor) {
if (!editor) {
debug('Editor not available in setupTabHandler')
return
}
debug('Setting up Tab key handler')
// Remove existing command to avoid conflicts
const existingCommand = editor.commands.byName.acceptGhostText
if (existingCommand) {
editor.commands.removeCommand(existingCommand)
}
// Register new Tab key handler command with highest priority
editor.commands.addCommand({
name: 'acceptGhostText',
bindKey: { win: 'Tab', mac: 'Tab' },
exec: (editor: Editor) => {
// Use our saved ghost text, not dependent on editor.ghostText
if (currentGhostText.value) {
debug(`Accepting ghost text: ${currentGhostText.value}`)
const position = editor.getCursorPosition()
const text = currentGhostText.value
// Insert text through session API
editor.session.insert(position, text)
clearGhostText()
debug('Ghost text inserted successfully')
return true // Prevent event propagation
}
debug('No ghost text to accept, allowing default tab behavior')
return false // Allow default Tab behavior
},
readOnly: false,
})
debug('Tab key handler set up successfully')
}
// Clear ghost text and reset state
function clearGhostText() {
if (!editorRef.value)
return
if (typeof editorRef.value.removeGhostText === 'function') {
editorRef.value.removeGhostText()
}
currentGhostText.value = ''
}
const debouncedApplyGhostText = debounce(applyGhostText, 1000, { leading: false, trailing: true })
debug('Editor initialized')
async function init(editor: Editor) {
const { enabled } = await openai.get_code_completion_enabled_status()
if (!enabled) {
debug('Code completion is not enabled')
return
}
editorRef.value = editor
// Set up Tab key handler
setupTabHandler(editor)
setTimeout(() => {
editor.on('change', (e: { action: string }) => {
debug(`Editor change event: ${e.action}`)
// If change is caused by user input, interrupt current completion
clearGhostText()
if (e.action === 'insert' || e.action === 'remove') {
// Clear current ghost text
debouncedApplyGhostText()
}
})
// Listen for cursor changes, using debounce
editor.selection.on('changeCursor', () => {
debug('Cursor changed')
clearGhostText()
debouncedApplyGhostText()
})
}, 2000)
}
function cleanUp() {
if (ws) {
ws.close()
}
debug('CodeCompletion unmounted')
}
return {
init,
cleanUp,
}
}
export default useCodeCompletion

View file

@ -1,49 +1,55 @@
<script setup lang="ts">
import type { Editor } from 'ace-builds'
import ace from 'ace-builds'
import extSearchboxUrl from 'ace-builds/src-noconflict/ext-searchbox?url'
import { VAceEditor } from 'vue3-ace-editor'
import useCodeCompletion from './CodeCompletion'
import 'ace-builds/src-noconflict/mode-nginx'
import 'ace-builds/src-noconflict/theme-monokai'
const props = defineProps<{
content?: string
defaultHeight?: string
readonly?: boolean
placeholder?: string
}>()
const emit = defineEmits(['update:content'])
const value = computed({
get() {
return props.content ?? ''
},
set(v) {
emit('update:content', v)
},
})
const content = defineModel<string>('content', { default: '' })
onMounted(() => {
try {
ace.config.setModuleUrl('ace/ext/searchbox', extSearchboxUrl)
}
catch (error) {
console.error('Failed to initialize Ace editor:', error)
console.error(`Failed to initialize Ace editor: ${error}`)
}
})
const codeCompletion = useCodeCompletion()
function init(editor: Editor) {
if (props.readonly) {
return
}
codeCompletion.init(editor)
}
onUnmounted(() => {
codeCompletion.cleanUp()
})
</script>
<template>
<VAceEditor
v-model:value="value"
v-model:value="content"
lang="nginx"
theme="monokai"
:style="{
minHeight: defaultHeight || '100vh',
borderRadius: '5px',
}"
:readonly="readonly"
:placeholder="placeholder"
:readonly
:placeholder
@init="init"
/>
</template>
@ -52,4 +58,9 @@ onMounted(() => {
z-index: 1;
position: relative;
}
:deep(.ace_ghost-text) {
color: #6a737d;
opacity: 0.8;
}
</style>

View file

@ -76,6 +76,24 @@ const providers = LLM_PROVIDERS.map(provider => ({
</ASelectOption>
</ASelect>
</AFormItem>
<AFormItem
:label="$gettext('Enable Code Completion')"
>
<ASwitch v-model:checked="data.openai.enable_code_completion" />
</AFormItem>
<AFormItem
v-if="data.openai.enable_code_completion"
:label="$gettext('Code Completion Model')"
:validate-status="errors?.openai?.code_completion_model ? 'error' : ''"
:help="errors?.openai?.code_completion_model === 'safety_text'
? $gettext('The model name should only contain letters, unicode, numbers, hyphens, dashes, colons, and dots.')
: $gettext('The model used for code completion, if not set, the chat model will be used.')"
>
<AAutoComplete
v-model:value="data.openai.code_completion_model"
:options="models"
/>
</AFormItem>
</AForm>
</template>

View file

@ -90,6 +90,8 @@ const data = ref<Settings>({
proxy: '',
token: '',
api_type: 'OPEN_AI',
enable_code_completion: false,
code_completion_model: '',
},
terminal: {
start_cmd: '',

View file

@ -1,59 +0,0 @@
---
# https://vitepress.dev/reference/default-theme-home-page
layout: home
title: Nginx UI
titleTemplate: واجهة ويب أخرى لـ Nginx
hero:
name: "Nginx UI"
text: "Yet another Nginx Web UI"
tagline: Simple, powerful, and fast.
image:
src: /assets/icon.svg
alt: Nginx UI
actions:
- theme: brand
text: Get Started
link: /guide/about
- theme: alt
text: View on Github
link: https://github.com/0xJacky/nginx-ui
features:
- icon: 📊
title: Online Statistics for Server Indicators
details: Monitor CPU usage, memory usage, load average, and disk usage in real-time.
- icon: 💬
title: Online ChatGPT Assistant
details: Get assistance from an AI-powered ChatGPT directly within the platform.
- icon: 🖱️
title: One-Click Deployment and Automatic Renewal
details: Easily deploy and auto-renew Let's Encrypt certificates with just one click.
- icon: 🛠️
title: Online Editing Websites Configurations
details: Edit configurations using our NgxConfigEditor block editor or Ace Code Editor with nginx syntax highlighting.
- icon: 📜
title: Online View Nginx Logs
details: Access and view your Nginx logs directly online.
- icon: 💻
title: Written in Go and Vue
details: The platform is built with Go and Vue, and distributed as a single executable binary.
- icon: 🔄
title: Automatically Test and Reload Configurations
details: Test configuration files and reload nginx automatically after saving changes.
- icon: 🖥️
title: Web Terminal
details: Access a web-based terminal for easy management.
- icon: 🌙
title: Dark Mode
details: Enable dark mode for a comfortable user experience.
- icon: 📱
title: Responsive Web Design
details: Enjoy a seamless experience on any device with responsive web design.
- icon: 🔐
title: 2FA Authentication
details: Secure sensitive actions with two-factor authentication.
---

View file

@ -43,9 +43,10 @@ Just want to try it out? Skip to the [Quickstart](./getting-started).
</div>
Nginx UI is a comprehensive web-based interface designed to simplify the management and configuration of Nginx servers.
It offers real-time server statistics, AI-powered ChatGPT assistance, one-click deployment, automatic renewal of Let's
Encrypt certificates, and user-friendly editing tools for website configurations. Additionally, Nginx UI provides
Nginx UI is a comprehensive web-based interface designed to simplify the management and configuration of Nginx single-node and cluster nodes.
It offers real-time server statistics, Nginx performance monitoring, AI-powered ChatGPT assistance,
the code editor that supports LLM Code Completion,
one-click deployment, automatic renewal of Let's Encrypt certificates, and user-friendly editing tools for website configurations. Additionally, Nginx UI provides
features such as online access to Nginx logs, automatic testing and reloading of configuration files, a web terminal,
dark mode, and responsive web design. Built with Go and Vue, Nginx UI ensures a seamless and efficient experience for
managing your Nginx server.
@ -63,7 +64,7 @@ managing your Nginx server.
- Enhanced Online ChatGPT Assistant with support for multiple models, including displaying Deepseek-R1's chain of thought to help you better understand and optimize configurations.
- One-click deployment and automatic renewal Let's Encrypt certificates.
- Online editing websites configurations with our self-designed **NgxConfigEditor** which is a user-friendly block
editor for nginx configurations, or **Ace Code Editor** which supports highlighting nginx configuration syntax.
editor for nginx configurations, or **Ace Code Editor** which supports **LLM Code Completion** and highlighting nginx configuration syntax.
- Online view Nginx logs.
- Written in Go and Vue, distribution is a single executable binary.
- Automatically test configuration file and reload nginx after saving configuration.

View file

@ -28,7 +28,7 @@ region, you can use an HTTP proxy and set this option to the corresponding URL.
- Type: `string`
- Default: `gpt-3.5-turbo`
This option is used to set the ChatGPT model. If your account has the privilege to access the gpt-4 model, you can
This option is used to set the chat model. If your account has the privilege to access the gpt-4 model, you can
configure this option accordingly.
## APIType
@ -40,3 +40,18 @@ This option is used to set the type of the API.
- `OPEN_AI`: Use the OpenAI API.
- `AZURE`: Use the Azure API.
## EnableCodeCompletion
- Type: `boolean`
- Default: `false`
- Version: `>=2.0.0-rc.6`
This option is used to enable the code completion feature in the code editor.
## CodeCompletionModel
- Type: `string`
- Version: `>=2.0.0-rc.6`
This option is used to set the code completion model, leave it blank if you want to use the chat model.

View file

@ -8,7 +8,7 @@ titleTemplate: Yet another Nginx Web UI
hero:
name: "Nginx UI"
text: "Yet another Nginx Web UI"
tagline: Simple, powerful, and fast.
tagline: Intelligent, powerful, and fast.
image:
src: /assets/icon.svg
alt: Nginx UI
@ -36,6 +36,9 @@ features:
- icon: 💬
title: Enhanced Online ChatGPT Assistant
details: Support for multiple models, including displaying Deepseek-R1's chain of thought to help you better understand and optimize configurations.
- icon: 🔍
title: Code Completion
details: Code editor supports code completion, help you write configuration faster.
- icon: 🖱️
title: One-Click Deployment and Automatic Renewal
details: Easily deploy and auto-renew Let's Encrypt certificates with just one click.

View file

@ -1,59 +0,0 @@
---
# https://vitepress.dev/reference/default-theme-home-page
layout: home
title: Nginx UI
titleTemplate: Yet another Nginx Web UI
hero:
name: "Nginx UI"
text: "Yet another Nginx Web UI"
tagline: Simple, powerful, and fast.
image:
src: /assets/icon.svg
alt: Nginx UI
actions:
- theme: brand
text: Get Started
link: /guide/about
- theme: alt
text: View on Github
link: https://github.com/0xJacky/nginx-ui
features:
- icon: 📊
title: Online Statistics for Server Indicators
details: Monitor CPU usage, memory usage, load average, and disk usage in real-time.
- icon: 💬
title: Online ChatGPT Assistant
details: Get assistance from an AI-powered ChatGPT directly within the platform.
- icon: 🖱️
title: One-Click Deployment and Automatic Renewal
details: Easily deploy and auto-renew Let's Encrypt certificates with just one click.
- icon: 🛠️
title: Online Editing Websites Configurations
details: Edit configurations using our NgxConfigEditor block editor or Ace Code Editor with nginx syntax highlighting.
- icon: 📜
title: Online View Nginx Logs
details: Access and view your Nginx logs directly online.
- icon: 💻
title: Written in Go and Vue
details: The platform is built with Go and Vue, and distributed as a single executable binary.
- icon: 🔄
title: Automatically Test and Reload Configurations
details: Test configuration files and reload nginx automatically after saving changes.
- icon: 🖥️
title: Web Terminal
details: Access a web-based terminal for easy management.
- icon: 🌙
title: Dark Mode
details: Enable dark mode for a comfortable user experience.
- icon: 📱
title: Responsive Web Design
details: Enjoy a seamless experience on any device with responsive web design.
- icon: 🔐
title: 2FA Authentication
details: Secure sensitive actions with two-factor authentication.
---

View file

@ -43,10 +43,11 @@ const members = [
</div>
Nginx UI 是一个全新的 Nginx 网络管理界面,旨在简化 Nginx 服务器的管理和配置。它提供实时服务器统计数据、ChatGPT
助手、一键部署、Let's Encrypt 证书的自动续签以及用户友好的网站配置编辑工具。此外Nginx UI 还提供了在线访问 Nginx
日志、配置文件的自动测试和重载、网络终端、深色模式和自适应网页设计等功能。Nginx UI 采用 Go 和 Vue 构建,确保在管理 Nginx
服务器时提供无缝高效的体验。
Nginx UI 是一个全新的 Nginx 网络管理界面,旨在简化 Nginx 单机和集群节点的管理和配置。
它提供实时服务器运行数据、Nginx 性能监控、ChatGPT 助手、支持大模型代码补全的代码编辑器、
一键部署 Let's Encrypt 证书的自动续签以及用户友好的网站配置编辑工具。此外Nginx UI 还提供了在线访问 Nginx
日志、配置文件的自动测试和重载、网络终端、深色模式和自适应网页设计等功能。
Nginx UI 采用 Go 和 Vue 构建,确保在管理 Nginx 服务器时提供无缝高效的体验。
## 我们的团队
@ -60,7 +61,7 @@ Nginx UI 是一个全新的 Nginx 网络管理界面,旨在简化 Nginx 服务
- 导出加密的 Nginx / Nginx UI 配置,方便快速部署和恢复到新环境
- 增强版在线 ChatGPT 助手,支持多种模型,包括显示 Deepseek-R1 的思考链,帮助您更好地理解和优化配置
- 一键申请和自动续签 Let's encrypt 证书
- 在线编辑 Nginx 配置文件,编辑器支持 Nginx 配置语法高亮
- 在线编辑 Nginx 配置文件,编辑器支持 **大模型代码补全** Nginx 配置语法高亮
- 在线查看 Nginx 日志
- 使用 Go 和 Vue 开发,发行版本为单个可执行的二进制文件
- 保存配置后自动测试配置文件并重载 Nginx

View file

@ -27,7 +27,7 @@ URL。
- 类型:`string`
- 默认值:`gpt-3.5-turbo`
此选项用于设置 ChatGPT 模型。如果您的帐户有权限访问 `gpt-4` 模型,可以相应地配置此选项。
此选项用于设置对话模型。如果您的帐户有权限访问 `gpt-4` 模型,可以相应地配置此选项。
## APIType
@ -38,3 +38,17 @@ URL。
- `OPEN_AI`: 使用 OpenAI API。
- `AZURE`: 使用 Azure API。
## EnableCodeCompletion
- 类型:`boolean`
- 默认值:`false`
- 版本:`>=2.0.0-rc.6`
此选项用于启用编辑器代码补全功能。
## CodeCompletionModel
- 类型:`string`
- 版本:`>=2.0.0-rc.6`
此选项用于设置代码补全的模型,留空则使用对话模型。

View file

@ -8,7 +8,7 @@ titleTemplate: Yet another Nginx Web UI
hero:
name: "Nginx UI"
text: "Nginx 网络管理界面的新选择"
tagline: 简单、强大、高速
tagline: 智能、强大、高速
image:
src: /assets/icon.svg
alt: Nginx UI
@ -36,6 +36,9 @@ features:
- icon: 💬
title: 增强版在线 ChatGPT 助手
details: 支持多种模型,包括显示 Deepseek-R1 的思考链,帮助您更好地理解和优化配置。
- icon: 🔍
title: 代码补全
details: 代码编辑器支持代码补全,帮助您更快地编写配置。
- icon: 🖱️
title: 一键部署和自动续期
details: 只需一键即可轻松部署和自动续期 Let's Encrypt 证书。

View file

@ -60,7 +60,7 @@ Nginx UI 是一個全新的 Nginx 網路管理介面,目的是簡化 Nginx 伺
- 匯出加密的 Nginx/NginxUI 設定,方便快速部署和恢復到新環境
- 增強版線上 ChatGPT 助手,支援多種模型,包括顯示 Deepseek-R1 的思考鏈,幫助您更好地理解和最佳化設定
- 一鍵申請和自動續簽 Let's encrypt 憑證
- 線上編輯 Nginx 設定檔案,編輯器支援 Nginx 設定語法突顯
- 線上編輯 Nginx 配置檔案,編輯器支援 **大模型代碼補全** 和 Nginx 配置語法突顯
- 線上檢視 Nginx 日誌
- 使用 Go 和 Vue 開發,發行版本為單個可執行檔案
- 儲存設定後自動測試設定檔案並過載 Nginx

View file

@ -27,7 +27,7 @@ URL。
- 型別:`string`
- 預設值:`gpt-3.5-turbo`
此選項用於設定 ChatGPT 模型。如果您的帳戶有許可權存取 `gpt-4` 模型,可以相應地設定此選項。
此選項用於設定對話模型。如果您的帳戶有許可權訪問 `gpt-4` 模型,可以相應地配置此選項。
## APIType
@ -38,3 +38,18 @@ URL。
- `OPEN_AI`: 使用 OpenAI API。
- `AZURE`: 使用 Azure API。
## EnableCodeCompletion
- 型別:`boolean`
- 預設值:`false`
- 版本:`>=2.0.0-rc.6`
此選項用於啟用編輯器代碼補全功能。
## CodeCompletionModel
- 型別:`string`
- 版本:`>=2.0.0-rc.6`
此選項用於設定代碼補全的模型,留空則使用對話模型。

View file

@ -8,7 +8,7 @@ titleTemplate: Yet another Nginx Web UI
hero:
name: "Nginx UI"
text: "Nginx 管理介面新選擇"
tagline: 簡單、強大、高速
tagline: 智能、強大、高速
image:
src: /assets/icon.svg
alt: Nginx UI
@ -36,6 +36,9 @@ features:
- icon: 💬
title: 線上 ChatGPT 助手
details: 支援多種模型,包括顯示 Deepseek-R1 的思考鏈,幫助您更好地理解和最佳化設定。
- icon: 🔍
title: 代碼補全
details: 代碼編輯器支持代碼補全,幫助您更快地編寫配置。
- icon: 🖱️
title: 一鍵部署和自動續期
details: 只需一鍵即可輕鬆部署和自動續期 Let's Encrypt 證書。

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/0xJacky/Nginx-UI/internal/transport"

View file

@ -0,0 +1,156 @@
package llm
import (
"context"
"regexp"
"strconv"
"strings"
"sync"
"github.com/0xJacky/Nginx-UI/settings"
"github.com/sashabaranov/go-openai"
"github.com/uozi-tech/cosy/logger"
)
const (
MaxTokens = 100
Temperature = 1
// Build system prompt and user prompt
SystemPrompt = "You are a code completion assistant. " +
"Complete the provided code snippet based on the context and instruction." +
"[IMPORTANT] Keep the original code indentation."
)
// Position the cursor position
type Position struct {
Row int `json:"row"`
Column int `json:"column"`
}
// CodeCompletionRequest the code completion request
type CodeCompletionRequest struct {
RequestID string `json:"request_id"`
UserID uint64 `json:"user_id"`
Context string `json:"context"`
Code string `json:"code"`
Suffix string `json:"suffix"`
Language string `json:"language"`
Position Position `json:"position"`
}
var (
requestContext = make(map[uint64]context.CancelFunc)
mutex sync.Mutex
)
func (c *CodeCompletionRequest) Send() (completedCode string, err error) {
if cancel, ok := requestContext[c.UserID]; ok {
logger.Infof("Code completion request cancelled for user %d", c.UserID)
cancel()
}
mutex.Lock()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
requestContext[c.UserID] = cancel
mutex.Unlock()
defer func() {
mutex.Lock()
delete(requestContext, c.UserID)
mutex.Unlock()
}()
openaiClient, err := GetClient()
if err != nil {
return
}
// Build user prompt with code and instruction
userPrompt := "Here is a file written in " + c.Language + ":\n```\n" + c.Context + "\n```\n"
userPrompt += "I'm editing at row " + strconv.Itoa(c.Position.Row) + ", column " + strconv.Itoa(c.Position.Column) + ".\n"
userPrompt += "Code before cursor:\n```\n" + c.Code + "\n```\n"
if c.Suffix != "" {
userPrompt += "Code after cursor:\n```\n" + c.Suffix + "\n```\n"
}
userPrompt += "Instruction: Only provide the completed code that should be inserted at the cursor position without explanations. " +
"The code should be syntactically correct and follow best practices for " + c.Language + "."
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: SystemPrompt,
},
{
Role: openai.ChatMessageRoleUser,
Content: userPrompt,
},
}
req := openai.ChatCompletionRequest{
Model: settings.OpenAISettings.GetCodeCompletionModel(),
Messages: messages,
MaxTokens: MaxTokens,
Temperature: Temperature,
}
// Make a direct (non-streaming) call to the API
response, err := openaiClient.CreateChatCompletion(ctx, req)
if err != nil {
return
}
completedCode = response.Choices[0].Message.Content
// extract the last word of the code
lastWord := extractLastWord(c.Code)
completedCode = cleanCompletionResponse(completedCode, lastWord)
logger.Infof("Code completion response: %s", completedCode)
return
}
// extractLastWord extract the last word of the code
func extractLastWord(code string) string {
if code == "" {
return ""
}
// define a regex to match word characters (letters, numbers, underscores)
re := regexp.MustCompile(`[a-zA-Z0-9_]+$`)
// find the last word of the code
match := re.FindString(code)
return match
}
// cleanCompletionResponse removes any <think></think> tags and their content from the completion response
// and strips the already entered code from the completion
func cleanCompletionResponse(response string, lastWord string) (cleanResp string) {
// remove <think></think> tags and their content using regex
re := regexp.MustCompile(`<think>[\s\S]*?</think>`)
cleanResp = re.ReplaceAllString(response, "")
// remove markdown code block tags
codeBlockRegex := regexp.MustCompile("```(?:[a-zA-Z]+)?\n((?:.|\n)*?)\n```")
matches := codeBlockRegex.FindStringSubmatch(cleanResp)
if len(matches) > 1 {
// extract the code block content
cleanResp = strings.TrimSpace(matches[1])
} else {
// if no code block is found, keep the original response
cleanResp = strings.TrimSpace(cleanResp)
}
// remove markdown backticks
cleanResp = strings.Trim(cleanResp, "`")
// if there is a last word, and the completion result starts with the last word, remove the already entered part
if lastWord != "" && strings.HasPrefix(cleanResp, lastWord) {
cleanResp = cleanResp[len(lastWord):]
}
return
}

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/0xJacky/Nginx-UI/internal/helper"

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/stretchr/testify/assert"

10
internal/llm/errors.go Normal file
View file

@ -0,0 +1,10 @@
package llm
import (
"github.com/uozi-tech/cosy"
)
var (
e = cosy.NewErrorScope("llm")
ErrCodeCompletionNotEnabled = e.New(400, "code completion is not enabled")
)

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/sashabaranov/go-openai"

View file

@ -1,4 +1,4 @@
package chatbot
package llm
import (
"github.com/sashabaranov/go-openai"

34
package-lock.json generated Normal file
View file

@ -0,0 +1,34 @@
{
"name": "nginx-ui",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"@types/lodash-es": "^4.17.12",
"lodash-es": "^4.17.21"
}
},
"node_modules/@types/lodash": {
"version": "4.17.16",
"resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.16.tgz",
"integrity": "sha512-HX7Em5NYQAXKW+1T+FiuG27NGwzJfCX3s1GjOa7ujxZa52kjJLOr4FUxT+giF6Tgxv1e+/czV/iTtBw27WTU9g==",
"license": "MIT"
},
"node_modules/@types/lodash-es": {
"version": "4.17.12",
"resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.12.tgz",
"integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==",
"license": "MIT",
"dependencies": {
"@types/lodash": "*"
}
},
"node_modules/lodash-es": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
"integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==",
"license": "MIT"
}
}
}

6
package.json Normal file
View file

@ -0,0 +1,6 @@
{
"dependencies": {
"@types/lodash-es": "^4.17.12",
"lodash-es": "^4.17.21"
}
}

View file

@ -8,8 +8,17 @@ type OpenAI struct {
Proxy string `json:"proxy" binding:"omitempty,url"`
Model string `json:"model" binding:"omitempty,safety_text"`
APIType string `json:"api_type" binding:"omitempty,oneof=OPEN_AI AZURE"`
EnableCodeCompletion bool `json:"enable_code_completion" binding:"omitempty"`
CodeCompletionModel string `json:"code_completion_model" binding:"omitempty,safety_text"`
}
var OpenAISettings = &OpenAI{
APIType: string(openai.APITypeOpenAI),
}
func (o *OpenAI) GetCodeCompletionModel() string {
if o.CodeCompletionModel == "" {
return o.Model
}
return o.CodeCompletionModel
}