mirror of
https://github.com/0xJacky/nginx-ui.git
synced 2025-05-11 02:15:48 +02:00
refactor: cache index
This commit is contained in:
parent
5d8d96fd4f
commit
269397e114
20 changed files with 532 additions and 364 deletions
8
api/index/router.go
Normal file
8
api/index/router.go
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package index
|
||||||
|
|
||||||
|
import "github.com/gin-gonic/gin"
|
||||||
|
|
||||||
|
// InitRouter registers all the index related routes
|
||||||
|
func InitRouter(r *gin.RouterGroup) {
|
||||||
|
r.GET("index/status", GetIndexStatus)
|
||||||
|
}
|
50
api/index/sse.go
Normal file
50
api/index/sse.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package index
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/0xJacky/Nginx-UI/api"
|
||||||
|
"github.com/0xJacky/Nginx-UI/internal/cache"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetIndexStatus is an SSE endpoint that sends real-time index status updates
|
||||||
|
func GetIndexStatus(c *gin.Context) {
|
||||||
|
api.SetSSEHeaders(c)
|
||||||
|
notify := c.Writer.CloseNotify()
|
||||||
|
|
||||||
|
// Subscribe to scanner status changes
|
||||||
|
statusChan := cache.SubscribeScanningStatus()
|
||||||
|
|
||||||
|
// Ensure we unsubscribe when the handler exits
|
||||||
|
defer cache.UnsubscribeScanningStatus(statusChan)
|
||||||
|
|
||||||
|
// Main event loop
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case status, ok := <-statusChan:
|
||||||
|
// If channel closed, exit
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send status update
|
||||||
|
c.Stream(func(w io.Writer) bool {
|
||||||
|
c.SSEvent("message", gin.H{
|
||||||
|
"scanning": status,
|
||||||
|
})
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
case <-time.After(30 * time.Second):
|
||||||
|
// Send heartbeat to keep connection alive
|
||||||
|
c.Stream(func(w io.Writer) bool {
|
||||||
|
c.SSEvent("heartbeat", "")
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
case <-notify:
|
||||||
|
// Client disconnected
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/0xJacky/Nginx-UI/internal/cache"
|
|
||||||
"github.com/0xJacky/Nginx-UI/internal/nginx_log"
|
"github.com/0xJacky/Nginx-UI/internal/nginx_log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -16,20 +15,24 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// PageSize defines the size of log chunks returned by the API
|
||||||
PageSize = 128 * 1024
|
PageSize = 128 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// controlStruct represents the request parameters for getting log content
|
||||||
type controlStruct struct {
|
type controlStruct struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"` // Type of log: "access" or "error"
|
||||||
LogPath string `json:"log_path"`
|
LogPath string `json:"log_path"` // Path to the log file
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nginxLogPageResp represents the response format for log content
|
||||||
type nginxLogPageResp struct {
|
type nginxLogPageResp struct {
|
||||||
Content string `json:"content"`
|
Content string `json:"content"` // Log content
|
||||||
Page int64 `json:"page"`
|
Page int64 `json:"page"` // Current page number
|
||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"` // Error message if any
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetNginxLogPage handles retrieving a page of log content from a log file
|
||||||
func GetNginxLogPage(c *gin.Context) {
|
func GetNginxLogPage(c *gin.Context) {
|
||||||
page := cast.ToInt64(c.Query("page"))
|
page := cast.ToInt64(c.Query("page"))
|
||||||
if page < 0 {
|
if page < 0 {
|
||||||
|
@ -84,6 +87,7 @@ func GetNginxLogPage(c *gin.Context) {
|
||||||
logger.Error(err)
|
logger.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
totalPage := logFileStat.Size() / PageSize
|
totalPage := logFileStat.Size() / PageSize
|
||||||
|
|
||||||
|
@ -100,7 +104,7 @@ func GetNginxLogPage(c *gin.Context) {
|
||||||
buf = make([]byte, PageSize)
|
buf = make([]byte, PageSize)
|
||||||
offset = (page - 1) * PageSize
|
offset = (page - 1) * PageSize
|
||||||
|
|
||||||
// seek
|
// seek to the correct position in the file
|
||||||
_, err = f.Seek(offset, io.SeekStart)
|
_, err = f.Seek(offset, io.SeekStart)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
c.JSON(http.StatusInternalServerError, nginxLogPageResp{
|
c.JSON(http.StatusInternalServerError, nginxLogPageResp{
|
||||||
|
@ -125,28 +129,29 @@ func GetNginxLogPage(c *gin.Context) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLogList returns a list of Nginx log files
|
||||||
func GetLogList(c *gin.Context) {
|
func GetLogList(c *gin.Context) {
|
||||||
filters := []func(*cache.NginxLogCache) bool{}
|
filters := []func(*nginx_log.NginxLogCache) bool{}
|
||||||
|
|
||||||
if c.Query("type") != "" {
|
if c.Query("type") != "" {
|
||||||
filters = append(filters, func(entry *cache.NginxLogCache) bool {
|
filters = append(filters, func(entry *nginx_log.NginxLogCache) bool {
|
||||||
return entry.Type == c.Query("type")
|
return entry.Type == c.Query("type")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Query("name") != "" {
|
if c.Query("name") != "" {
|
||||||
filters = append(filters, func(entry *cache.NginxLogCache) bool {
|
filters = append(filters, func(entry *nginx_log.NginxLogCache) bool {
|
||||||
return strings.Contains(entry.Name, c.Query("name"))
|
return strings.Contains(entry.Name, c.Query("name"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Query("path") != "" {
|
if c.Query("path") != "" {
|
||||||
filters = append(filters, func(entry *cache.NginxLogCache) bool {
|
filters = append(filters, func(entry *nginx_log.NginxLogCache) bool {
|
||||||
return strings.Contains(entry.Path, c.Query("path"))
|
return strings.Contains(entry.Path, c.Query("path"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
data := cache.GetAllLogPaths(filters...)
|
data := nginx_log.GetAllLogs(filters...)
|
||||||
|
|
||||||
orderBy := c.DefaultQuery("sort_by", "name")
|
orderBy := c.DefaultQuery("sort_by", "name")
|
||||||
sort := c.DefaultQuery("order", "desc")
|
sort := c.DefaultQuery("order", "desc")
|
||||||
|
|
|
@ -2,8 +2,8 @@ package nginx_log
|
||||||
|
|
||||||
import "github.com/gin-gonic/gin"
|
import "github.com/gin-gonic/gin"
|
||||||
|
|
||||||
|
// InitRouter registers all the nginx log related routes
|
||||||
func InitRouter(r *gin.RouterGroup) {
|
func InitRouter(r *gin.RouterGroup) {
|
||||||
r.GET("nginx_log", Log)
|
r.GET("nginx_log", Log)
|
||||||
r.GET("nginx_logs", GetLogList)
|
r.GET("nginx_logs", GetLogList)
|
||||||
r.GET("nginx_logs/index_status", GetNginxLogsLive)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,10 +15,10 @@ func GetNginxLogsLive(c *gin.Context) {
|
||||||
notify := c.Writer.CloseNotify()
|
notify := c.Writer.CloseNotify()
|
||||||
|
|
||||||
// Subscribe to scanner status changes
|
// Subscribe to scanner status changes
|
||||||
statusChan := cache.SubscribeStatusChanges()
|
statusChan := cache.SubscribeScanningStatus()
|
||||||
|
|
||||||
// Ensure we unsubscribe when the handler exits
|
// Ensure we unsubscribe when the handler exits
|
||||||
defer cache.UnsubscribeStatusChanges(statusChan)
|
defer cache.UnsubscribeScanningStatus(statusChan)
|
||||||
|
|
||||||
// Main event loop
|
// Main event loop
|
||||||
for {
|
for {
|
||||||
|
|
|
@ -16,6 +16,8 @@ import (
|
||||||
"github.com/uozi-tech/cosy/logger"
|
"github.com/uozi-tech/cosy/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// getLogPath resolves the log file path based on the provided control parameters
|
||||||
|
// It checks if the path is under the whitelist directories
|
||||||
func getLogPath(control *controlStruct) (logPath string, err error) {
|
func getLogPath(control *controlStruct) (logPath string, err error) {
|
||||||
// If direct log path is provided, use it
|
// If direct log path is provided, use it
|
||||||
if control.LogPath != "" {
|
if control.LogPath != "" {
|
||||||
|
@ -58,6 +60,7 @@ func getLogPath(control *controlStruct) (logPath string, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tailNginxLog tails the specified log file and sends each line to the websocket
|
||||||
func tailNginxLog(ws *websocket.Conn, controlChan chan controlStruct, errChan chan error) {
|
func tailNginxLog(ws *websocket.Conn, controlChan chan controlStruct, errChan chan error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
|
@ -130,6 +133,7 @@ func tailNginxLog(ws *websocket.Conn, controlChan chan controlStruct, errChan ch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleLogControl processes websocket control messages
|
||||||
func handleLogControl(ws *websocket.Conn, controlChan chan controlStruct, errChan chan error) {
|
func handleLogControl(ws *websocket.Conn, controlChan chan controlStruct, errChan chan error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
|
@ -160,6 +164,7 @@ func handleLogControl(ws *websocket.Conn, controlChan chan controlStruct, errCha
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log handles websocket connection for real-time log viewing
|
||||||
func Log(c *gin.Context) {
|
func Log(c *gin.Context) {
|
||||||
var upGrader = websocket.Upgrader{
|
var upGrader = websocket.Upgrader{
|
||||||
CheckOrigin: func(r *http.Request) bool {
|
CheckOrigin: func(r *http.Request) bool {
|
||||||
|
|
17
app/src/api/cache_index.ts
Normal file
17
app/src/api/cache_index.ts
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
import { useUserStore } from '@/pinia'
|
||||||
|
import { SSE } from 'sse.js'
|
||||||
|
|
||||||
|
const cache_index = {
|
||||||
|
index_status() {
|
||||||
|
const { token } = useUserStore()
|
||||||
|
const url = `/api/index/status`
|
||||||
|
|
||||||
|
return new SSE(url, {
|
||||||
|
headers: {
|
||||||
|
Authorization: token,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
export default cache_index
|
|
@ -1,6 +1,4 @@
|
||||||
import http from '@/lib/http'
|
import http from '@/lib/http'
|
||||||
import { useUserStore } from '@/pinia'
|
|
||||||
import { SSE } from 'sse.js'
|
|
||||||
|
|
||||||
export interface INginxLogData {
|
export interface INginxLogData {
|
||||||
type: string
|
type: string
|
||||||
|
@ -19,17 +17,6 @@ const nginx_log = {
|
||||||
}) {
|
}) {
|
||||||
return http.get(`/nginx_logs`, { params })
|
return http.get(`/nginx_logs`, { params })
|
||||||
},
|
},
|
||||||
|
|
||||||
logs_live() {
|
|
||||||
const { token } = useUserStore()
|
|
||||||
const url = `/api/nginx_logs/index_status`
|
|
||||||
|
|
||||||
return new SSE(url, {
|
|
||||||
headers: {
|
|
||||||
Authorization: token,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export default nginx_log
|
export default nginx_log
|
||||||
|
|
|
@ -4,34 +4,6 @@
|
||||||
|
|
||||||
const notifications: Record<string, { title: () => string, content: (args: any) => string }> = {
|
const notifications: Record<string, { title: () => string, content: (args: any) => string }> = {
|
||||||
|
|
||||||
// cert module notifications
|
|
||||||
'Sync Certificate Error': {
|
|
||||||
title: () => $gettext('Sync Certificate Error'),
|
|
||||||
content: (args: any) => $gettext('Sync Certificate %{cert_name} to %{env_name} failed', args),
|
|
||||||
},
|
|
||||||
'Sync Certificate Success': {
|
|
||||||
title: () => $gettext('Sync Certificate Success'),
|
|
||||||
content: (args: any) => $gettext('Sync Certificate %{cert_name} to %{env_name} successfully', args),
|
|
||||||
},
|
|
||||||
|
|
||||||
// config module notifications
|
|
||||||
'Sync Config Error': {
|
|
||||||
title: () => $gettext('Sync Config Error'),
|
|
||||||
content: (args: any) => $gettext('Sync config %{config_name} to %{env_name} failed', args),
|
|
||||||
},
|
|
||||||
'Sync Config Success': {
|
|
||||||
title: () => $gettext('Sync Config Success'),
|
|
||||||
content: (args: any) => $gettext('Sync config %{config_name} to %{env_name} successfully', args),
|
|
||||||
},
|
|
||||||
'Rename Remote Config Error': {
|
|
||||||
title: () => $gettext('Rename Remote Config Error'),
|
|
||||||
content: (args: any) => $gettext('Rename %{orig_path} to %{new_path} on %{env_name} failed', args),
|
|
||||||
},
|
|
||||||
'Rename Remote Config Success': {
|
|
||||||
title: () => $gettext('Rename Remote Config Success'),
|
|
||||||
content: (args: any) => $gettext('Rename %{orig_path} to %{new_path} on %{env_name} successfully', args),
|
|
||||||
},
|
|
||||||
|
|
||||||
// site module notifications
|
// site module notifications
|
||||||
'Delete Remote Site Error': {
|
'Delete Remote Site Error': {
|
||||||
title: () => $gettext('Delete Remote Site Error'),
|
title: () => $gettext('Delete Remote Site Error'),
|
||||||
|
@ -121,6 +93,34 @@ const notifications: Record<string, { title: () => string, content: (args: any)
|
||||||
title: () => $gettext('All Recovery Codes Have Been Used'),
|
title: () => $gettext('All Recovery Codes Have Been Used'),
|
||||||
content: (args: any) => $gettext('Please generate new recovery codes in the preferences immediately to prevent lockout.', args),
|
content: (args: any) => $gettext('Please generate new recovery codes in the preferences immediately to prevent lockout.', args),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// cert module notifications
|
||||||
|
'Sync Certificate Error': {
|
||||||
|
title: () => $gettext('Sync Certificate Error'),
|
||||||
|
content: (args: any) => $gettext('Sync Certificate %{cert_name} to %{env_name} failed', args),
|
||||||
|
},
|
||||||
|
'Sync Certificate Success': {
|
||||||
|
title: () => $gettext('Sync Certificate Success'),
|
||||||
|
content: (args: any) => $gettext('Sync Certificate %{cert_name} to %{env_name} successfully', args),
|
||||||
|
},
|
||||||
|
|
||||||
|
// config module notifications
|
||||||
|
'Sync Config Error': {
|
||||||
|
title: () => $gettext('Sync Config Error'),
|
||||||
|
content: (args: any) => $gettext('Sync config %{config_name} to %{env_name} failed', args),
|
||||||
|
},
|
||||||
|
'Sync Config Success': {
|
||||||
|
title: () => $gettext('Sync Config Success'),
|
||||||
|
content: (args: any) => $gettext('Sync config %{config_name} to %{env_name} successfully', args),
|
||||||
|
},
|
||||||
|
'Rename Remote Config Error': {
|
||||||
|
title: () => $gettext('Rename Remote Config Error'),
|
||||||
|
content: (args: any) => $gettext('Rename %{orig_path} to %{new_path} on %{env_name} failed', args),
|
||||||
|
},
|
||||||
|
'Rename Remote Config Success': {
|
||||||
|
title: () => $gettext('Rename Remote Config Success'),
|
||||||
|
content: (args: any) => $gettext('Rename %{orig_path} to %{new_path} on %{env_name} successfully', args),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
export default notifications
|
export default notifications
|
||||||
|
|
|
@ -18,13 +18,6 @@ export const systemRoutes: RouteRecordRaw[] = [
|
||||||
meta: {
|
meta: {
|
||||||
name: () => $gettext('Self Check'),
|
name: () => $gettext('Self Check'),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
path: 'about',
|
|
||||||
name: 'About',
|
|
||||||
component: () => import('@/views/system/About.vue'),
|
|
||||||
meta: {
|
|
||||||
name: () => $gettext('About'),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
path: 'backup',
|
path: 'backup',
|
||||||
name: 'Backup',
|
name: 'Backup',
|
||||||
|
@ -44,6 +37,13 @@ export const systemRoutes: RouteRecordRaw[] = [
|
||||||
return settings.is_remote
|
return settings.is_remote
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
path: 'about',
|
||||||
|
name: 'About',
|
||||||
|
component: () => import('@/views/system/About.vue'),
|
||||||
|
meta: {
|
||||||
|
name: () => $gettext('About'),
|
||||||
|
},
|
||||||
}],
|
}],
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
import type { CustomRender } from '@/components/StdDesign/StdDataDisplay/StdTableTransformer'
|
import type { CustomRender } from '@/components/StdDesign/StdDataDisplay/StdTableTransformer'
|
||||||
import type { Column } from '@/components/StdDesign/types'
|
import type { Column } from '@/components/StdDesign/types'
|
||||||
import type { SSE, SSEvent } from 'sse.js'
|
import type { SSE, SSEvent } from 'sse.js'
|
||||||
|
import cacheIndex from '@/api/cache_index'
|
||||||
import nginxLog from '@/api/nginx_log'
|
import nginxLog from '@/api/nginx_log'
|
||||||
import StdCurd from '@/components/StdDesign/StdDataDisplay/StdCurd.vue'
|
import StdCurd from '@/components/StdDesign/StdDataDisplay/StdCurd.vue'
|
||||||
import { input, select } from '@/components/StdDesign/StdDataEntry'
|
import { input, select } from '@/components/StdDesign/StdDataEntry'
|
||||||
|
@ -64,12 +65,12 @@ function viewLog(record: { type: string, path: string }) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect to SSE endpoint and setup handlers
|
// Connect to SSE endpoint and setup handlers
|
||||||
function setupSSE() {
|
async function setupSSE() {
|
||||||
if (sse.value) {
|
if (sse.value) {
|
||||||
sse.value.close()
|
sse.value.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
sse.value = nginxLog.logs_live()
|
sse.value = cacheIndex.index_status()
|
||||||
|
|
||||||
// Handle incoming messages
|
// Handle incoming messages
|
||||||
if (sse.value) {
|
if (sse.value) {
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -23,7 +23,6 @@ require (
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/hpcloud/tail v1.0.0
|
|
||||||
github.com/jpillora/overseer v1.1.6
|
github.com/jpillora/overseer v1.1.6
|
||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
github.com/minio/selfupdate v0.6.0
|
github.com/minio/selfupdate v0.6.0
|
||||||
|
@ -268,7 +267,6 @@ require (
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
||||||
google.golang.org/grpc v1.71.0 // indirect
|
google.golang.org/grpc v1.71.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.6 // indirect
|
google.golang.org/protobuf v1.36.6 // indirect
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
gopkg.in/ns1/ns1-go.v2 v2.13.0 // indirect
|
gopkg.in/ns1/ns1-go.v2 v2.13.0 // indirect
|
||||||
|
|
22
go.sum
22
go.sum
|
@ -782,7 +782,6 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
@ -902,8 +901,6 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5
|
||||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
|
||||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||||
|
@ -981,12 +978,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||||
github.com/go-webauthn/webauthn v0.12.2 h1:yLaNPgBUEXDQtWnOjhsGhMMCEWbXwjg/aNkC8riJQI8=
|
|
||||||
github.com/go-webauthn/webauthn v0.12.2/go.mod h1:Q8SZPPj4sZ469fNTcQXxRpzJOdb30jQrn/36FX8jilA=
|
|
||||||
github.com/go-webauthn/webauthn v0.12.3 h1:hHQl1xkUuabUU9uS+ISNCMLs9z50p9mDUZI/FmkayNE=
|
github.com/go-webauthn/webauthn v0.12.3 h1:hHQl1xkUuabUU9uS+ISNCMLs9z50p9mDUZI/FmkayNE=
|
||||||
github.com/go-webauthn/webauthn v0.12.3/go.mod h1:4JRe8Z3W7HIw8NGEWn2fnUwecoDzkkeach/NnvhkqGY=
|
github.com/go-webauthn/webauthn v0.12.3/go.mod h1:4JRe8Z3W7HIw8NGEWn2fnUwecoDzkkeach/NnvhkqGY=
|
||||||
github.com/go-webauthn/x v0.1.19 h1:IUfdHiBRoTdujpBA/14qbrMXQ3LGzYe/PRGWdZcmudg=
|
|
||||||
github.com/go-webauthn/x v0.1.19/go.mod h1:C5arLuTQ3pVHKPw89v7CDGnqAZSZJj+4Jnr40dsn7tk=
|
|
||||||
github.com/go-webauthn/x v0.1.20 h1:brEBDqfiPtNNCdS/peu8gARtq8fIPsHz0VzpPjGvgiw=
|
github.com/go-webauthn/x v0.1.20 h1:brEBDqfiPtNNCdS/peu8gARtq8fIPsHz0VzpPjGvgiw=
|
||||||
github.com/go-webauthn/x v0.1.20/go.mod h1:n/gAc8ssZJGATM0qThE+W+vfgXiMedsWi3wf/C4lld0=
|
github.com/go-webauthn/x v0.1.20/go.mod h1:n/gAc8ssZJGATM0qThE+W+vfgXiMedsWi3wf/C4lld0=
|
||||||
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||||
|
@ -1221,7 +1214,6 @@ github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4
|
||||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.142 h1:9iOJ8tfNLw8uSiR5yx7VcHEYSOajJq5hb9SXF0BCUdA=
|
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.142 h1:9iOJ8tfNLw8uSiR5yx7VcHEYSOajJq5hb9SXF0BCUdA=
|
||||||
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.142/go.mod h1:Y/+YLCFCJtS29i2MbYPTUlNNfwXvkzEsZKR0imY/2aY=
|
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.142/go.mod h1:Y/+YLCFCJtS29i2MbYPTUlNNfwXvkzEsZKR0imY/2aY=
|
||||||
|
@ -1662,8 +1654,6 @@ github.com/selectel/domains-go v1.1.0 h1:futG50J43ALLKQAnZk9H9yOtLGnSUh7c5hSvuC5
|
||||||
github.com/selectel/domains-go v1.1.0/go.mod h1:SugRKfq4sTpnOHquslCpzda72wV8u0cMBHx0C0l+bzA=
|
github.com/selectel/domains-go v1.1.0/go.mod h1:SugRKfq4sTpnOHquslCpzda72wV8u0cMBHx0C0l+bzA=
|
||||||
github.com/selectel/go-selvpcclient/v3 v3.2.1 h1:ny6WIAMiHzKxOgOEnwcWE79wIQij1AHHylzPA41MXCw=
|
github.com/selectel/go-selvpcclient/v3 v3.2.1 h1:ny6WIAMiHzKxOgOEnwcWE79wIQij1AHHylzPA41MXCw=
|
||||||
github.com/selectel/go-selvpcclient/v3 v3.2.1/go.mod h1:3EfSf8aEWyhspOGbvZ6mvnFg7JN5uckxNyBFPGWsXNQ=
|
github.com/selectel/go-selvpcclient/v3 v3.2.1/go.mod h1:3EfSf8aEWyhspOGbvZ6mvnFg7JN5uckxNyBFPGWsXNQ=
|
||||||
github.com/shirou/gopsutil/v4 v4.25.2 h1:NMscG3l2CqtWFS86kj3vP7soOczqrQYIEhO/pMvvQkk=
|
|
||||||
github.com/shirou/gopsutil/v4 v4.25.2/go.mod h1:34gBYJzyqCDT11b6bMHP0XCvWeU3J61XRT7a2EmCRTA=
|
|
||||||
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
|
@ -1755,13 +1745,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
|
||||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1128/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1133 h1:S+ZHcAfI8+ii4MfsCr41R3CdhlTsc5OddGsCfeYJdl8=
|
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1133/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1134 h1:NDCzSm7r8OZeWQje1FJNHM73Ku4QRrCP1GymfgZYLSM=
|
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1134 h1:NDCzSm7r8OZeWQje1FJNHM73Ku4QRrCP1GymfgZYLSM=
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1134/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.1134/go.mod h1:r5r4xbfxSaeR04b166HGsBa/R4U3SueirEUpXGuw+Q0=
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1128 h1:mrJ5Fbkd7sZIJ5F6oRfh5zebPQaudPH9Y0+GUmFytYU=
|
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1128/go.mod h1:zbsYIBT+VTX4z4ocjTAdLBIWyNYj3z0BRqd0iPdnjsk=
|
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1134 h1:Iel1hDW0eQt6p8YDRH2EbjiK5mqC4KEzabSKV0ZQ6FY=
|
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1134 h1:Iel1hDW0eQt6p8YDRH2EbjiK5mqC4KEzabSKV0ZQ6FY=
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1134/go.mod h1:8R/Xhu0hKGRFT30uwoN44bisb3cOoNjV8iwH65DjqUc=
|
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.1134/go.mod h1:8R/Xhu0hKGRFT30uwoN44bisb3cOoNjV8iwH65DjqUc=
|
||||||
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
|
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
|
||||||
|
@ -1786,8 +1771,6 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E
|
||||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||||
github.com/ultradns/ultradns-go-sdk v1.8.0-20241010134910-243eeec h1:2s/ghQ8wKE+UzD/hf3P4Gd1j0JI9ncbxv+nsypPoUYI=
|
github.com/ultradns/ultradns-go-sdk v1.8.0-20241010134910-243eeec h1:2s/ghQ8wKE+UzD/hf3P4Gd1j0JI9ncbxv+nsypPoUYI=
|
||||||
github.com/ultradns/ultradns-go-sdk v1.8.0-20241010134910-243eeec/go.mod h1:BZr7Qs3ku1ckpqed8tCRSqTlp8NAeZfAVpfx4OzXMss=
|
github.com/ultradns/ultradns-go-sdk v1.8.0-20241010134910-243eeec/go.mod h1:BZr7Qs3ku1ckpqed8tCRSqTlp8NAeZfAVpfx4OzXMss=
|
||||||
github.com/uozi-tech/cosy v1.17.0 h1:qrdBhbylsHGIOUcUsZKUdVzq8fLvePIclHVSGdszyxk=
|
|
||||||
github.com/uozi-tech/cosy v1.17.0/go.mod h1:jEyznv+lmbb0YO0gU//yn4PnyqncTlyV2H5BpDa5aEw=
|
|
||||||
github.com/uozi-tech/cosy v1.18.0 h1:L0o1yQ6hTRdzUjWwcT/cJX0AcNaDaaL30gF8pJHUEzM=
|
github.com/uozi-tech/cosy v1.18.0 h1:L0o1yQ6hTRdzUjWwcT/cJX0AcNaDaaL30gF8pJHUEzM=
|
||||||
github.com/uozi-tech/cosy v1.18.0/go.mod h1:8s8oQENTTGcmOGas/hkLvE+pZPyNG6AIblRbFgPRCwg=
|
github.com/uozi-tech/cosy v1.18.0/go.mod h1:8s8oQENTTGcmOGas/hkLvE+pZPyNG6AIblRbFgPRCwg=
|
||||||
github.com/uozi-tech/cosy-driver-mysql v0.2.2 h1:22S/XNIvuaKGqxQPsYPXN8TZ8hHjCQdcJKVQ83Vzxoo=
|
github.com/uozi-tech/cosy-driver-mysql v0.2.2 h1:22S/XNIvuaKGqxQPsYPXN8TZ8hHjCQdcJKVQ83Vzxoo=
|
||||||
|
@ -1797,10 +1780,6 @@ github.com/uozi-tech/cosy-driver-postgres v0.2.1/go.mod h1:eAy1A89yHbAEfjkhNAifa
|
||||||
github.com/uozi-tech/cosy-driver-sqlite v0.2.1 h1:W+Z4pY25PSJCeReqroG7LIBeffsqotbpHzgqSMqZDIM=
|
github.com/uozi-tech/cosy-driver-sqlite v0.2.1 h1:W+Z4pY25PSJCeReqroG7LIBeffsqotbpHzgqSMqZDIM=
|
||||||
github.com/uozi-tech/cosy-driver-sqlite v0.2.1/go.mod h1:2ya7Z5P3HzFi1ktfL8gvwaAGx0DDV0bmWxNSNpaLlwo=
|
github.com/uozi-tech/cosy-driver-sqlite v0.2.1/go.mod h1:2ya7Z5P3HzFi1ktfL8gvwaAGx0DDV0bmWxNSNpaLlwo=
|
||||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||||
github.com/urfave/cli/v3 v3.0.0-beta1 h1:6DTaaUarcM0wX7qj5Hcvs+5Dm3dyUTBbEwIWAjcw9Zg=
|
|
||||||
github.com/urfave/cli/v3 v3.0.0-beta1/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y=
|
|
||||||
github.com/urfave/cli/v3 v3.1.0 h1:kQR+oiqpJkBAONxBjM4RWivD4AfPHL/f4vqe/gjYU8M=
|
|
||||||
github.com/urfave/cli/v3 v3.1.0/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo=
|
|
||||||
github.com/urfave/cli/v3 v3.1.1 h1:bNnl8pFI5dxPOjeONvFCDFoECLQsceDG4ejahs4Jtxk=
|
github.com/urfave/cli/v3 v3.1.1 h1:bNnl8pFI5dxPOjeONvFCDFoECLQsceDG4ejahs4Jtxk=
|
||||||
github.com/urfave/cli/v3 v3.1.1/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo=
|
github.com/urfave/cli/v3 v3.1.1/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo=
|
||||||
github.com/vinyldns/go-vinyldns v0.9.16 h1:GZJStDkcCk1F1AcRc64LuuMh+ENL8pHA0CVd4ulRMcQ=
|
github.com/vinyldns/go-vinyldns v0.9.16 h1:GZJStDkcCk1F1AcRc64LuuMh+ENL8pHA0CVd4ulRMcQ=
|
||||||
|
@ -2679,7 +2658,6 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||||
gopkg.in/h2non/gock.v1 v1.0.15 h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0=
|
gopkg.in/h2non/gock.v1 v1.0.15 h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0=
|
||||||
|
|
4
internal/cache/cache.go
vendored
4
internal/cache/cache.go
vendored
|
@ -21,8 +21,8 @@ func Init() {
|
||||||
logger.Fatal("initializing local cache err", err)
|
logger.Fatal("initializing local cache err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the nginx log scanner
|
// Initialize the config scanner
|
||||||
InitNginxLogScanner()
|
InitScanner()
|
||||||
}
|
}
|
||||||
|
|
||||||
func Set(key string, value interface{}, ttl time.Duration) {
|
func Set(key string, value interface{}, ttl time.Duration) {
|
||||||
|
|
|
@ -8,76 +8,82 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/0xJacky/Nginx-UI/internal/helper"
|
|
||||||
"github.com/0xJacky/Nginx-UI/internal/nginx"
|
"github.com/0xJacky/Nginx-UI/internal/nginx"
|
||||||
"github.com/0xJacky/Nginx-UI/settings"
|
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/fsnotify/fsnotify"
|
||||||
"github.com/uozi-tech/cosy/logger"
|
"github.com/uozi-tech/cosy/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NginxLogCache represents a cached log entry from nginx configuration
|
// ScanCallback is a function that gets called during config scanning
|
||||||
type NginxLogCache struct {
|
// It receives the config file path and contents
|
||||||
Path string `json:"path"` // Path to the log file
|
type ScanCallback func(configPath string, content []byte) error
|
||||||
Type string `json:"type"` // Type of log: "access" or "error"
|
|
||||||
Name string `json:"name"` // Name of the log file
|
// Scanner is responsible for scanning and watching nginx config files
|
||||||
|
type Scanner struct {
|
||||||
|
watcher *fsnotify.Watcher // File system watcher
|
||||||
|
scanTicker *time.Ticker // Ticker for periodic scanning
|
||||||
|
initialized bool // Whether the scanner has been initialized
|
||||||
|
scanning bool // Whether a scan is currently in progress
|
||||||
|
scanMutex sync.RWMutex // Mutex for protecting the scanning state
|
||||||
|
statusChan chan bool // Channel to broadcast scanning status changes
|
||||||
|
subscribers map[chan bool]struct{} // Set of subscribers
|
||||||
|
subscriberMux sync.RWMutex // Mutex for protecting the subscribers map
|
||||||
}
|
}
|
||||||
|
|
||||||
// NginxLogScanner is responsible for scanning and watching nginx config files for log directives
|
// Global variables
|
||||||
type NginxLogScanner struct {
|
var (
|
||||||
logCache map[string]*NginxLogCache // Map of log path to cache entry
|
// scanner is the singleton instance of Scanner
|
||||||
cacheMutex sync.RWMutex // Mutex for protecting the cache
|
scanner *Scanner
|
||||||
watcher *fsnotify.Watcher // File system watcher
|
configScannerInitMux sync.Mutex
|
||||||
scanTicker *time.Ticker // Ticker for periodic scanning
|
|
||||||
initialized bool // Whether the scanner has been initialized
|
// This regex matches: include directives in nginx config files
|
||||||
scanning bool // Whether a scan is currently in progress
|
includeRegex = regexp.MustCompile(`include\s+([^;]+);`)
|
||||||
scanMutex sync.RWMutex // Mutex for protecting the scanning state
|
|
||||||
statusChan chan bool // Channel to broadcast scanning status changes
|
// Global callbacks that will be executed during config file scanning
|
||||||
subscribers map[chan bool]struct{} // Set of subscribers
|
scanCallbacks []ScanCallback
|
||||||
subscriberMux sync.RWMutex // Mutex for protecting the subscribers map
|
scanCallbacksMutex sync.RWMutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Initialize the callbacks slice
|
||||||
|
scanCallbacks = make([]ScanCallback, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add regex constants at package level
|
// InitScanner initializes the config scanner
|
||||||
var (
|
func InitScanner() {
|
||||||
// logScanner is the singleton instance of NginxLogScanner
|
s := GetScanner()
|
||||||
logScanner *NginxLogScanner
|
err := s.Initialize()
|
||||||
scannerInitMux sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compile the regular expressions for matching log directives
|
|
||||||
var (
|
|
||||||
// This regex matches: access_log or error_log, followed by a path, and optional parameters ending with semicolon
|
|
||||||
logDirectiveRegex = regexp.MustCompile(`(?m)(access_log|error_log)\s+([^\s;]+)(?:\s+[^;]+)?;`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// InitNginxLogScanner initializes the nginx log scanner
|
|
||||||
func InitNginxLogScanner() {
|
|
||||||
scanner := GetNginxLogScanner()
|
|
||||||
err := scanner.Initialize()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to initialize nginx log scanner:", err)
|
logger.Error("Failed to initialize config scanner:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNginxLogScanner returns the singleton instance of NginxLogScanner
|
// GetScanner returns the singleton instance of Scanner
|
||||||
func GetNginxLogScanner() *NginxLogScanner {
|
func GetScanner() *Scanner {
|
||||||
scannerInitMux.Lock()
|
configScannerInitMux.Lock()
|
||||||
defer scannerInitMux.Unlock()
|
defer configScannerInitMux.Unlock()
|
||||||
|
|
||||||
if logScanner == nil {
|
if scanner == nil {
|
||||||
logScanner = &NginxLogScanner{
|
scanner = &Scanner{
|
||||||
logCache: make(map[string]*NginxLogCache),
|
|
||||||
statusChan: make(chan bool, 10), // Buffer to prevent blocking
|
statusChan: make(chan bool, 10), // Buffer to prevent blocking
|
||||||
subscribers: make(map[chan bool]struct{}),
|
subscribers: make(map[chan bool]struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start broadcaster goroutine
|
// Start broadcaster goroutine
|
||||||
go logScanner.broadcastStatus()
|
go scanner.broadcastStatus()
|
||||||
}
|
}
|
||||||
return logScanner
|
return scanner
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterCallback adds a callback function to be executed during scans
|
||||||
|
// This function can be called before Scanner is initialized
|
||||||
|
func RegisterCallback(callback ScanCallback) {
|
||||||
|
scanCallbacksMutex.Lock()
|
||||||
|
defer scanCallbacksMutex.Unlock()
|
||||||
|
scanCallbacks = append(scanCallbacks, callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
// broadcastStatus listens for status changes and broadcasts to all subscribers
|
// broadcastStatus listens for status changes and broadcasts to all subscribers
|
||||||
func (s *NginxLogScanner) broadcastStatus() {
|
func (s *Scanner) broadcastStatus() {
|
||||||
for status := range s.statusChan {
|
for status := range s.statusChan {
|
||||||
s.subscriberMux.RLock()
|
s.subscriberMux.RLock()
|
||||||
for ch := range s.subscribers {
|
for ch := range s.subscribers {
|
||||||
|
@ -92,9 +98,9 @@ func (s *NginxLogScanner) broadcastStatus() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubscribeStatusChanges allows a client to subscribe to scanning status changes
|
// SubscribeScanningStatus allows a client to subscribe to scanning status changes
|
||||||
func SubscribeStatusChanges() chan bool {
|
func SubscribeScanningStatus() chan bool {
|
||||||
s := GetNginxLogScanner()
|
s := GetScanner()
|
||||||
ch := make(chan bool, 5) // Buffer to prevent blocking
|
ch := make(chan bool, 5) // Buffer to prevent blocking
|
||||||
|
|
||||||
// Add to subscribers
|
// Add to subscribers
|
||||||
|
@ -116,9 +122,9 @@ func SubscribeStatusChanges() chan bool {
|
||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnsubscribeStatusChanges removes a subscriber from receiving status updates
|
// UnsubscribeScanningStatus removes a subscriber from receiving status updates
|
||||||
func UnsubscribeStatusChanges(ch chan bool) {
|
func UnsubscribeScanningStatus(ch chan bool) {
|
||||||
s := GetNginxLogScanner()
|
s := GetScanner()
|
||||||
|
|
||||||
s.subscriberMux.Lock()
|
s.subscriberMux.Lock()
|
||||||
delete(s.subscribers, ch)
|
delete(s.subscribers, ch)
|
||||||
|
@ -128,8 +134,8 @@ func UnsubscribeStatusChanges(ch chan bool) {
|
||||||
close(ch)
|
close(ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize sets up the log scanner and starts watching for file changes
|
// Initialize sets up the scanner and starts watching for file changes
|
||||||
func (s *NginxLogScanner) Initialize() error {
|
func (s *Scanner) Initialize() error {
|
||||||
if s.initialized {
|
if s.initialized {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -209,7 +215,7 @@ func (s *NginxLogScanner) Initialize() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchForChanges handles the fsnotify events and triggers rescans when necessary
|
// watchForChanges handles the fsnotify events and triggers rescans when necessary
|
||||||
func (s *NginxLogScanner) watchForChanges() {
|
func (s *Scanner) watchForChanges() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case event, ok := <-s.watcher.Events:
|
case event, ok := <-s.watcher.Events:
|
||||||
|
@ -228,7 +234,7 @@ func (s *NginxLogScanner) watchForChanges() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process file changes - no .conf restriction anymore
|
// Process file changes
|
||||||
if !event.Has(fsnotify.Remove) {
|
if !event.Has(fsnotify.Remove) {
|
||||||
logger.Debug("Config file changed:", event.Name)
|
logger.Debug("Config file changed:", event.Name)
|
||||||
// Give the system a moment to finish writing the file
|
// Give the system a moment to finish writing the file
|
||||||
|
@ -239,9 +245,7 @@ func (s *NginxLogScanner) watchForChanges() {
|
||||||
logger.Error("Failed to scan changed file:", err)
|
logger.Error("Failed to scan changed file:", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// For removed files, we need to clean up any log entries that came from this file
|
// For removed files, we need a full rescan
|
||||||
// This would require tracking which logs came from which config files
|
|
||||||
// For now, we'll do a full rescan which is simpler but less efficient
|
|
||||||
err := s.ScanAllConfigs()
|
err := s.ScanAllConfigs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to rescan configs after file removal:", err)
|
logger.Error("Failed to rescan configs after file removal:", err)
|
||||||
|
@ -257,8 +261,8 @@ func (s *NginxLogScanner) watchForChanges() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanSingleFile scans a single file and updates the log cache accordingly
|
// scanSingleFile scans a single file and executes all registered callbacks
|
||||||
func (s *NginxLogScanner) scanSingleFile(filePath string) error {
|
func (s *Scanner) scanSingleFile(filePath string) error {
|
||||||
// Set scanning state to true
|
// Set scanning state to true
|
||||||
s.scanMutex.Lock()
|
s.scanMutex.Lock()
|
||||||
wasScanning := s.scanning
|
wasScanning := s.scanning
|
||||||
|
@ -278,134 +282,30 @@ func (s *NginxLogScanner) scanSingleFile(filePath string) error {
|
||||||
s.scanMutex.Unlock()
|
s.scanMutex.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Create a temporary cache for new entries from this file
|
|
||||||
newEntries := make(map[string]*NginxLogCache)
|
|
||||||
|
|
||||||
// Scan the file
|
|
||||||
err := s.scanConfigFile(filePath, newEntries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the main cache with new entries
|
|
||||||
s.cacheMutex.Lock()
|
|
||||||
for path, entry := range newEntries {
|
|
||||||
s.logCache[path] = entry
|
|
||||||
}
|
|
||||||
s.cacheMutex.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScanAllConfigs scans all nginx config files for log directives
|
|
||||||
func (s *NginxLogScanner) ScanAllConfigs() error {
|
|
||||||
// Set scanning state to true
|
|
||||||
s.scanMutex.Lock()
|
|
||||||
wasScanning := s.scanning
|
|
||||||
s.scanning = true
|
|
||||||
if !wasScanning {
|
|
||||||
// Only broadcast if status changed from not scanning to scanning
|
|
||||||
s.statusChan <- true
|
|
||||||
}
|
|
||||||
s.scanMutex.Unlock()
|
|
||||||
|
|
||||||
// Ensure we reset scanning state when done
|
|
||||||
defer func() {
|
|
||||||
s.scanMutex.Lock()
|
|
||||||
s.scanning = false
|
|
||||||
// Broadcast the completion
|
|
||||||
s.statusChan <- false
|
|
||||||
s.scanMutex.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Initialize a new cache to replace the old one
|
|
||||||
newCache := make(map[string]*NginxLogCache)
|
|
||||||
|
|
||||||
// Get the main config file
|
|
||||||
mainConfigPath := nginx.GetConfPath("", "nginx.conf")
|
|
||||||
err := s.scanConfigFile(mainConfigPath, newCache)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("Failed to scan main config:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan sites-available directory - no .conf restriction anymore
|
|
||||||
sitesAvailablePath := nginx.GetConfPath("sites-available", "")
|
|
||||||
sitesAvailableFiles, err := os.ReadDir(sitesAvailablePath)
|
|
||||||
if err == nil {
|
|
||||||
for _, file := range sitesAvailableFiles {
|
|
||||||
if !file.IsDir() {
|
|
||||||
configPath := filepath.Join(sitesAvailablePath, file.Name())
|
|
||||||
err := s.scanConfigFile(configPath, newCache)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("Failed to scan config:", configPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan stream-available directory if it exists
|
|
||||||
streamAvailablePath := nginx.GetConfPath("stream-available", "")
|
|
||||||
streamAvailableFiles, err := os.ReadDir(streamAvailablePath)
|
|
||||||
if err == nil {
|
|
||||||
for _, file := range streamAvailableFiles {
|
|
||||||
if !file.IsDir() {
|
|
||||||
configPath := filepath.Join(streamAvailablePath, file.Name())
|
|
||||||
err := s.scanConfigFile(configPath, newCache)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("Failed to scan stream config:", configPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replace the old cache with the new one
|
|
||||||
s.cacheMutex.Lock()
|
|
||||||
s.logCache = newCache
|
|
||||||
s.cacheMutex.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanConfigFile scans a single config file for log directives using regex
|
|
||||||
func (s *NginxLogScanner) scanConfigFile(configPath string, cache map[string]*NginxLogCache) error {
|
|
||||||
// Open the file
|
// Open the file
|
||||||
file, err := os.Open(configPath)
|
file, err := os.Open(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Read the entire file content
|
// Read the entire file content
|
||||||
content, err := os.ReadFile(configPath)
|
content, err := os.ReadFile(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find all matches of log directives
|
// Execute all registered callbacks
|
||||||
matches := logDirectiveRegex.FindAllSubmatch(content, -1)
|
scanCallbacksMutex.RLock()
|
||||||
for _, match := range matches {
|
for _, callback := range scanCallbacks {
|
||||||
if len(match) >= 3 {
|
err := callback(filePath, content)
|
||||||
directiveType := string(match[1]) // "access_log" or "error_log"
|
if err != nil {
|
||||||
logPath := string(match[2]) // The log file path
|
logger.Error("Callback error for file", filePath, ":", err)
|
||||||
|
|
||||||
// Validate the log path
|
|
||||||
if isValidLogPath(logPath) {
|
|
||||||
logType := "access"
|
|
||||||
if directiveType == "error_log" {
|
|
||||||
logType = "error"
|
|
||||||
}
|
|
||||||
|
|
||||||
cache[logPath] = &NginxLogCache{
|
|
||||||
Path: logPath,
|
|
||||||
Type: logType,
|
|
||||||
Name: filepath.Base(logPath),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
scanCallbacksMutex.RUnlock()
|
||||||
|
|
||||||
// Look for include directives to process included files
|
// Look for include directives to process included files
|
||||||
includeRegex := regexp.MustCompile(`include\s+([^;]+);`)
|
|
||||||
includeMatches := includeRegex.FindAllSubmatch(content, -1)
|
includeMatches := includeRegex.FindAllSubmatch(content, -1)
|
||||||
|
|
||||||
for _, match := range includeMatches {
|
for _, match := range includeMatches {
|
||||||
|
@ -430,7 +330,7 @@ func (s *NginxLogScanner) scanConfigFile(configPath string, cache map[string]*Ng
|
||||||
for _, matchedFile := range matchedFiles {
|
for _, matchedFile := range matchedFiles {
|
||||||
fileInfo, err := os.Stat(matchedFile)
|
fileInfo, err := os.Stat(matchedFile)
|
||||||
if err == nil && !fileInfo.IsDir() {
|
if err == nil && !fileInfo.IsDir() {
|
||||||
err = s.scanConfigFile(matchedFile, cache)
|
err = s.scanSingleFile(matchedFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to scan included file:", matchedFile, err)
|
logger.Error("Failed to scan included file:", matchedFile, err)
|
||||||
}
|
}
|
||||||
|
@ -446,7 +346,7 @@ func (s *NginxLogScanner) scanConfigFile(configPath string, cache map[string]*Ng
|
||||||
|
|
||||||
fileInfo, err := os.Stat(includePath)
|
fileInfo, err := os.Stat(includePath)
|
||||||
if err == nil && !fileInfo.IsDir() {
|
if err == nil && !fileInfo.IsDir() {
|
||||||
err = s.scanConfigFile(includePath, cache)
|
err = s.scanSingleFile(includePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to scan included file:", includePath, err)
|
logger.Error("Failed to scan included file:", includePath, err)
|
||||||
}
|
}
|
||||||
|
@ -458,77 +358,69 @@ func (s *NginxLogScanner) scanConfigFile(configPath string, cache map[string]*Ng
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isLogPathUnderWhiteList checks if the log path is under one of the paths in LogDirWhiteList
|
// ScanAllConfigs scans all nginx config files and executes all registered callbacks
|
||||||
// This is a duplicate of the function in nginx_log package to avoid import cycle
|
func (s *Scanner) ScanAllConfigs() error {
|
||||||
func isLogPathUnderWhiteList(path string) bool {
|
// Set scanning state to true
|
||||||
// deep copy
|
s.scanMutex.Lock()
|
||||||
logDirWhiteList := append([]string{}, settings.NginxSettings.LogDirWhiteList...)
|
wasScanning := s.scanning
|
||||||
|
s.scanning = true
|
||||||
accessLogPath := nginx.GetAccessLogPath()
|
if !wasScanning {
|
||||||
errorLogPath := nginx.GetErrorLogPath()
|
// Only broadcast if status changed from not scanning to scanning
|
||||||
|
s.statusChan <- true
|
||||||
if accessLogPath != "" {
|
|
||||||
logDirWhiteList = append(logDirWhiteList, filepath.Dir(accessLogPath))
|
|
||||||
}
|
|
||||||
if errorLogPath != "" {
|
|
||||||
logDirWhiteList = append(logDirWhiteList, filepath.Dir(errorLogPath))
|
|
||||||
}
|
}
|
||||||
|
s.scanMutex.Unlock()
|
||||||
|
|
||||||
for _, whitePath := range logDirWhiteList {
|
// Ensure we reset scanning state when done
|
||||||
if helper.IsUnderDirectory(path, whitePath) {
|
defer func() {
|
||||||
return true
|
s.scanMutex.Lock()
|
||||||
}
|
s.scanning = false
|
||||||
}
|
// Broadcast the completion
|
||||||
return false
|
s.statusChan <- false
|
||||||
}
|
s.scanMutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
// isValidLogPath checks if a log path is valid:
|
// Get the main config file
|
||||||
// 1. It must be a regular file or a symlink to a regular file
|
mainConfigPath := nginx.GetConfPath("", "nginx.conf")
|
||||||
// 2. It must not point to a console or special device
|
err := s.scanSingleFile(mainConfigPath)
|
||||||
// 3. It must be under the whitelist directories
|
|
||||||
func isValidLogPath(logPath string) bool {
|
|
||||||
// First check if the path is under the whitelist
|
|
||||||
if !isLogPathUnderWhiteList(logPath) {
|
|
||||||
logger.Warn("Log path is not under whitelist:", logPath)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the path exists
|
|
||||||
fileInfo, err := os.Lstat(logPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If file doesn't exist, it might be created later
|
logger.Error("Failed to scan main config:", err)
|
||||||
// We'll assume it's valid for now
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it's a symlink, follow it
|
// Scan sites-available directory
|
||||||
if fileInfo.Mode()&os.ModeSymlink != 0 {
|
sitesAvailablePath := nginx.GetConfPath("sites-available", "")
|
||||||
linkTarget, err := os.Readlink(logPath)
|
sitesAvailableFiles, err := os.ReadDir(sitesAvailablePath)
|
||||||
if err != nil {
|
if err == nil {
|
||||||
return false
|
for _, file := range sitesAvailableFiles {
|
||||||
|
if !file.IsDir() {
|
||||||
|
configPath := filepath.Join(sitesAvailablePath, file.Name())
|
||||||
|
err := s.scanSingleFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("Failed to scan config:", configPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make absolute path if the link target is relative
|
|
||||||
if !filepath.IsAbs(linkTarget) {
|
|
||||||
linkTarget = filepath.Join(filepath.Dir(logPath), linkTarget)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the target file
|
|
||||||
targetInfo, err := os.Stat(linkTarget)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only accept regular files as targets
|
|
||||||
return targetInfo.Mode().IsRegular()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For non-symlinks, just check if it's a regular file
|
// Scan stream-available directory if it exists
|
||||||
return fileInfo.Mode().IsRegular()
|
streamAvailablePath := nginx.GetConfPath("stream-available", "")
|
||||||
|
streamAvailableFiles, err := os.ReadDir(streamAvailablePath)
|
||||||
|
if err == nil {
|
||||||
|
for _, file := range streamAvailableFiles {
|
||||||
|
if !file.IsDir() {
|
||||||
|
configPath := filepath.Join(streamAvailablePath, file.Name())
|
||||||
|
err := s.scanSingleFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("Failed to scan stream config:", configPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown cleans up resources used by the scanner
|
// Shutdown cleans up resources used by the scanner
|
||||||
func (s *NginxLogScanner) Shutdown() {
|
func (s *Scanner) Shutdown() {
|
||||||
if s.watcher != nil {
|
if s.watcher != nil {
|
||||||
s.watcher.Close()
|
s.watcher.Close()
|
||||||
}
|
}
|
||||||
|
@ -551,34 +443,9 @@ func (s *NginxLogScanner) Shutdown() {
|
||||||
close(s.statusChan)
|
close(s.statusChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllLogPaths returns all cached log paths
|
// IsScanningInProgress returns whether a scan is currently in progress
|
||||||
func GetAllLogPaths(filters ...func(*NginxLogCache) bool) []*NginxLogCache {
|
func IsScanningInProgress() bool {
|
||||||
s := GetNginxLogScanner()
|
s := GetScanner()
|
||||||
s.cacheMutex.RLock()
|
|
||||||
defer s.cacheMutex.RUnlock()
|
|
||||||
|
|
||||||
result := make([]*NginxLogCache, 0, len(s.logCache))
|
|
||||||
for _, cache := range s.logCache {
|
|
||||||
flag := true
|
|
||||||
if len(filters) > 0 {
|
|
||||||
for _, filter := range filters {
|
|
||||||
if !filter(cache) {
|
|
||||||
flag = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if flag {
|
|
||||||
result = append(result, cache)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsScanning returns whether a scan is currently in progress
|
|
||||||
func IsScanning() bool {
|
|
||||||
s := GetNginxLogScanner()
|
|
||||||
s.scanMutex.RLock()
|
s.scanMutex.RLock()
|
||||||
defer s.scanMutex.RUnlock()
|
defer s.scanMutex.RUnlock()
|
||||||
return s.scanning
|
return s.scanning
|
|
@ -161,6 +161,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type"
|
"https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"auth_oidc": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#auth_oidc"
|
||||||
|
]
|
||||||
|
},
|
||||||
"auth_request": {
|
"auth_request": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request"
|
"https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request"
|
||||||
|
@ -251,16 +256,36 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout"
|
"https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"client_id": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#client_id"
|
||||||
|
]
|
||||||
|
},
|
||||||
"client_max_body_size": {
|
"client_max_body_size": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size"
|
"https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"client_secret": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#client_secret"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"config_url": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#config_url"
|
||||||
|
]
|
||||||
|
},
|
||||||
"connection_pool_size": {
|
"connection_pool_size": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_core_module.html#connection_pool_size"
|
"https://nginx.org/en/docs/http/ngx_http_core_module.html#connection_pool_size"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"cookie_name": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#cookie_name"
|
||||||
|
]
|
||||||
|
},
|
||||||
"create_full_put_path": {
|
"create_full_put_path": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_dav_module.html#create_full_put_path"
|
"https://nginx.org/en/docs/http/ngx_http_dav_module.html#create_full_put_path"
|
||||||
|
@ -357,6 +382,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_headers_module.html#expires"
|
"https://nginx.org/en/docs/http/ngx_http_headers_module.html#expires"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"extra_auth_args": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#extra_auth_args"
|
||||||
|
]
|
||||||
|
},
|
||||||
"f4f": {
|
"f4f": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_f4f_module.html#f4f"
|
"https://nginx.org/en/docs/http/ngx_http_f4f_module.html#f4f"
|
||||||
|
@ -1098,6 +1128,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash"
|
"https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"issuer": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#issuer"
|
||||||
|
]
|
||||||
|
},
|
||||||
"js_access": {
|
"js_access": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_access"
|
"https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_access"
|
||||||
|
@ -1639,6 +1674,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm"
|
"https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"oidc_provider": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#oidc_provider"
|
||||||
|
]
|
||||||
|
},
|
||||||
"open_file_cache": {
|
"open_file_cache": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache"
|
"https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache"
|
||||||
|
@ -1670,6 +1710,11 @@
|
||||||
"https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter"
|
"https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"otel_resource_attr": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/ngx_otel_module.html#otel_resource_attr"
|
||||||
|
]
|
||||||
|
},
|
||||||
"otel_service_name": {
|
"otel_service_name": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/ngx_otel_module.html#otel_service_name"
|
"https://nginx.org/en/docs/ngx_otel_module.html#otel_service_name"
|
||||||
|
@ -1775,6 +1820,11 @@
|
||||||
"https://nginx.org/en/docs/mail/ngx_mail_core_module.html#protocol"
|
"https://nginx.org/en/docs/mail/ngx_mail_core_module.html#protocol"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"proxy": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/ngx_mgmt_module.html#proxy"
|
||||||
|
]
|
||||||
|
},
|
||||||
"proxy_bind": {
|
"proxy_bind": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_bind",
|
"https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_bind",
|
||||||
|
@ -2027,6 +2077,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_trailers"
|
"https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_trailers"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"proxy_password": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/ngx_mgmt_module.html#proxy_password"
|
||||||
|
]
|
||||||
|
},
|
||||||
"proxy_protocol": {
|
"proxy_protocol": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_protocol",
|
"https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_protocol",
|
||||||
|
@ -2225,6 +2280,11 @@
|
||||||
"https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate"
|
"https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"proxy_username": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/ngx_mgmt_module.html#proxy_username"
|
||||||
|
]
|
||||||
|
},
|
||||||
"queue": {
|
"queue": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue"
|
"https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue"
|
||||||
|
@ -2286,6 +2346,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_core_module.html#recursive_error_pages"
|
"https://nginx.org/en/docs/http/ngx_http_core_module.html#recursive_error_pages"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"redirect_uri": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#redirect_uri"
|
||||||
|
]
|
||||||
|
},
|
||||||
"referer_hash_bucket_size": {
|
"referer_hash_bucket_size": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_referer_module.html#referer_hash_bucket_size"
|
"https://nginx.org/en/docs/http/ngx_http_referer_module.html#referer_hash_bucket_size"
|
||||||
|
@ -2576,6 +2641,11 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_temp_path"
|
"https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_temp_path"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"scope": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#scope"
|
||||||
|
]
|
||||||
|
},
|
||||||
"secure_link": {
|
"secure_link": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link"
|
"https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link"
|
||||||
|
@ -2664,6 +2734,16 @@
|
||||||
"https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log_zone"
|
"https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log_zone"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"session_store": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#session_store"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"session_timeout": {
|
||||||
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#session_timeout"
|
||||||
|
]
|
||||||
|
},
|
||||||
"set": {
|
"set": {
|
||||||
"links": [
|
"links": [
|
||||||
"https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set",
|
"https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set",
|
||||||
|
@ -2802,6 +2882,7 @@
|
||||||
},
|
},
|
||||||
"ssl_crl": {
|
"ssl_crl": {
|
||||||
"links": [
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#ssl_crl",
|
||||||
"https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_crl",
|
"https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_crl",
|
||||||
"https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_crl",
|
"https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_crl",
|
||||||
"https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_crl",
|
"https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_crl",
|
||||||
|
@ -2952,6 +3033,7 @@
|
||||||
},
|
},
|
||||||
"ssl_trusted_certificate": {
|
"ssl_trusted_certificate": {
|
||||||
"links": [
|
"links": [
|
||||||
|
"https://nginx.org/en/docs/http/ngx_http_oidc_module.html#ssl_trusted_certificate",
|
||||||
"https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_trusted_certificate",
|
"https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_trusted_certificate",
|
||||||
"https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_trusted_certificate",
|
"https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_trusted_certificate",
|
||||||
"https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_trusted_certificate",
|
"https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_trusted_certificate",
|
||||||
|
|
68
internal/nginx_log/log_cache.go
Normal file
68
internal/nginx_log/log_cache.go
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
package nginx_log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NginxLogCache represents a cached log entry from nginx configuration
|
||||||
|
type NginxLogCache struct {
|
||||||
|
Path string `json:"path"` // Path to the log file
|
||||||
|
Type string `json:"type"` // Type of log: "access" or "error"
|
||||||
|
Name string `json:"name"` // Name of the log file
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// logCache is the map to store all found log files
|
||||||
|
logCache map[string]*NginxLogCache
|
||||||
|
cacheMutex sync.RWMutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Initialize the cache
|
||||||
|
logCache = make(map[string]*NginxLogCache)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLogPath adds a log path to the log cache
|
||||||
|
func AddLogPath(path, logType, name string) {
|
||||||
|
cacheMutex.Lock()
|
||||||
|
defer cacheMutex.Unlock()
|
||||||
|
|
||||||
|
logCache[path] = &NginxLogCache{
|
||||||
|
Path: path,
|
||||||
|
Type: logType,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllLogPaths returns all cached log paths
|
||||||
|
func GetAllLogPaths(filters ...func(*NginxLogCache) bool) []*NginxLogCache {
|
||||||
|
cacheMutex.RLock()
|
||||||
|
defer cacheMutex.RUnlock()
|
||||||
|
|
||||||
|
result := make([]*NginxLogCache, 0, len(logCache))
|
||||||
|
for _, cache := range logCache {
|
||||||
|
flag := true
|
||||||
|
if len(filters) > 0 {
|
||||||
|
for _, filter := range filters {
|
||||||
|
if !filter(cache) {
|
||||||
|
flag = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if flag {
|
||||||
|
result = append(result, cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearLogCache clears all entries in the log cache
|
||||||
|
func ClearLogCache() {
|
||||||
|
cacheMutex.Lock()
|
||||||
|
defer cacheMutex.Unlock()
|
||||||
|
|
||||||
|
// Clear the cache
|
||||||
|
logCache = make(map[string]*NginxLogCache)
|
||||||
|
}
|
|
@ -2,10 +2,10 @@ package nginx_log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/0xJacky/Nginx-UI/internal/cache"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// typeToInt converts log type string to a sortable integer
|
||||||
|
// "access" = 0, "error" = 1
|
||||||
func typeToInt(t string) int {
|
func typeToInt(t string) int {
|
||||||
if t == "access" {
|
if t == "access" {
|
||||||
return 0
|
return 0
|
||||||
|
@ -13,7 +13,9 @@ func typeToInt(t string) int {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortCompare(i, j *cache.NginxLogCache, key string, order string) bool {
|
// sortCompare compares two log entries based on the specified key and order
|
||||||
|
// Returns true if i should come after j in the sorted list
|
||||||
|
func sortCompare(i, j *NginxLogCache, key string, order string) bool {
|
||||||
flag := false
|
flag := false
|
||||||
|
|
||||||
switch key {
|
switch key {
|
||||||
|
@ -32,8 +34,11 @@ func sortCompare(i, j *cache.NginxLogCache, key string, order string) bool {
|
||||||
return flag
|
return flag
|
||||||
}
|
}
|
||||||
|
|
||||||
func Sort(key string, order string, configs []*cache.NginxLogCache) []*cache.NginxLogCache {
|
// Sort sorts a list of NginxLogCache entries by the specified key and order
|
||||||
slices.SortStableFunc(configs, func(i, j *cache.NginxLogCache) int {
|
// Supported keys: "type", "name"
|
||||||
|
// Supported orders: "asc", "desc"
|
||||||
|
func Sort(key string, order string, configs []*NginxLogCache) []*NginxLogCache {
|
||||||
|
slices.SortStableFunc(configs, func(i, j *NginxLogCache) int {
|
||||||
if sortCompare(i, j, key, order) {
|
if sortCompare(i, j, key, order) {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,19 +2,114 @@ package nginx_log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
"github.com/0xJacky/Nginx-UI/internal/cache"
|
"github.com/0xJacky/Nginx-UI/internal/cache"
|
||||||
"github.com/0xJacky/Nginx-UI/internal/helper"
|
"github.com/0xJacky/Nginx-UI/internal/helper"
|
||||||
"github.com/0xJacky/Nginx-UI/internal/nginx"
|
"github.com/0xJacky/Nginx-UI/internal/nginx"
|
||||||
"github.com/0xJacky/Nginx-UI/settings"
|
"github.com/0xJacky/Nginx-UI/settings"
|
||||||
"path/filepath"
|
"github.com/uozi-tech/cosy/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsLogPathUnderWhiteList checks if the log path is under one of the paths in LogDirWhiteList
|
// Regular expression for log directives - matches access_log or error_log
|
||||||
|
var logDirectiveRegex = regexp.MustCompile(`(?m)(access_log|error_log)\s+([^\s;]+)(?:\s+[^;]+)?;`)
|
||||||
|
|
||||||
|
// Use init function to automatically register callback
|
||||||
|
func init() {
|
||||||
|
// Register the callback directly with the global registry
|
||||||
|
cache.RegisterCallback(scanForLogDirectives)
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanForLogDirectives scans and parses configuration files for log directives
|
||||||
|
func scanForLogDirectives(configPath string, content []byte) error {
|
||||||
|
// Clear previous scan results when scanning the main config
|
||||||
|
if configPath == nginx.GetConfPath("", "nginx.conf") {
|
||||||
|
ClearLogCache()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find log directives using regex
|
||||||
|
matches := logDirectiveRegex.FindAllSubmatch(content, -1)
|
||||||
|
|
||||||
|
// Parse log paths
|
||||||
|
for _, match := range matches {
|
||||||
|
if len(match) >= 3 {
|
||||||
|
directiveType := string(match[1]) // "access_log" or "error_log"
|
||||||
|
logPath := string(match[2]) // Path to log file
|
||||||
|
|
||||||
|
// Validate log path
|
||||||
|
if IsLogPathUnderWhiteList(logPath) && isValidLogPath(logPath) {
|
||||||
|
logType := "access"
|
||||||
|
if directiveType == "error_log" {
|
||||||
|
logType = "error"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to cache
|
||||||
|
AddLogPath(logPath, logType, filepath.Base(logPath))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllLogs returns all log paths
|
||||||
|
func GetAllLogs(filters ...func(*NginxLogCache) bool) []*NginxLogCache {
|
||||||
|
return GetAllLogPaths(filters...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isValidLogPath checks if a log path is valid:
|
||||||
|
// 1. It must be a regular file or a symlink to a regular file
|
||||||
|
// 2. It must not point to a console or special device
|
||||||
|
// 3. It must be under the whitelist directories
|
||||||
|
func isValidLogPath(logPath string) bool {
|
||||||
|
// First check if the path is in the whitelist
|
||||||
|
if !IsLogPathUnderWhiteList(logPath) {
|
||||||
|
logger.Warn("Log path is not under whitelist:", logPath)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the path exists
|
||||||
|
fileInfo, err := os.Lstat(logPath)
|
||||||
|
if err != nil {
|
||||||
|
// If the file doesn't exist, it might be created later
|
||||||
|
// We'll assume it's valid for now
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it's a symlink, follow it
|
||||||
|
if fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||||
|
linkTarget, err := os.Readlink(logPath)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the link target path absolute if it's relative
|
||||||
|
if !filepath.IsAbs(linkTarget) {
|
||||||
|
linkTarget = filepath.Join(filepath.Dir(logPath), linkTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the target file
|
||||||
|
targetInfo, err := os.Stat(linkTarget)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only accept regular files as targets
|
||||||
|
return targetInfo.Mode().IsRegular()
|
||||||
|
}
|
||||||
|
|
||||||
|
// For non-symlinks, just check if it's a regular file
|
||||||
|
return fileInfo.Mode().IsRegular()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLogPathUnderWhiteList checks if a log path is under one of the paths in LogDirWhiteList
|
||||||
func IsLogPathUnderWhiteList(path string) bool {
|
func IsLogPathUnderWhiteList(path string) bool {
|
||||||
cacheKey := fmt.Sprintf("isLogPathUnderWhiteList:%s", path)
|
cacheKey := fmt.Sprintf("isLogPathUnderWhiteList:%s", path)
|
||||||
res, ok := cache.Get(cacheKey)
|
res, ok := cache.Get(cacheKey)
|
||||||
|
|
||||||
// deep copy
|
// Deep copy the whitelist
|
||||||
logDirWhiteList := append([]string{}, settings.NginxSettings.LogDirWhiteList...)
|
logDirWhiteList := append([]string{}, settings.NginxSettings.LogDirWhiteList...)
|
||||||
|
|
||||||
accessLogPath := nginx.GetAccessLogPath()
|
accessLogPath := nginx.GetAccessLogPath()
|
||||||
|
@ -27,7 +122,7 @@ func IsLogPathUnderWhiteList(path string) bool {
|
||||||
logDirWhiteList = append(logDirWhiteList, filepath.Dir(errorLogPath))
|
logDirWhiteList = append(logDirWhiteList, filepath.Dir(errorLogPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
// no cache, check it
|
// No cache, check it
|
||||||
if !ok {
|
if !ok {
|
||||||
for _, whitePath := range logDirWhiteList {
|
for _, whitePath := range logDirWhiteList {
|
||||||
if helper.IsUnderDirectory(path, whitePath) {
|
if helper.IsUnderDirectory(path, whitePath) {
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/0xJacky/Nginx-UI/api/cluster"
|
"github.com/0xJacky/Nginx-UI/api/cluster"
|
||||||
"github.com/0xJacky/Nginx-UI/api/config"
|
"github.com/0xJacky/Nginx-UI/api/config"
|
||||||
"github.com/0xJacky/Nginx-UI/api/crypto"
|
"github.com/0xJacky/Nginx-UI/api/crypto"
|
||||||
|
"github.com/0xJacky/Nginx-UI/api/index"
|
||||||
"github.com/0xJacky/Nginx-UI/api/nginx"
|
"github.com/0xJacky/Nginx-UI/api/nginx"
|
||||||
nginxLog "github.com/0xJacky/Nginx-UI/api/nginx_log"
|
nginxLog "github.com/0xJacky/Nginx-UI/api/nginx_log"
|
||||||
"github.com/0xJacky/Nginx-UI/api/notification"
|
"github.com/0xJacky/Nginx-UI/api/notification"
|
||||||
|
@ -82,6 +83,7 @@ func InitRouter() {
|
||||||
{
|
{
|
||||||
terminal.InitRouter(o)
|
terminal.InitRouter(o)
|
||||||
}
|
}
|
||||||
|
index.InitRouter(w)
|
||||||
nginxLog.InitRouter(w)
|
nginxLog.InitRouter(w)
|
||||||
upstream.InitRouter(w)
|
upstream.InitRouter(w)
|
||||||
system.InitWebSocketRouter(w)
|
system.InitWebSocketRouter(w)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue