mirror of
https://github.com/0xJacky/nginx-ui.git
synced 2025-05-11 18:35:51 +02:00
refactor: project directory structure
This commit is contained in:
parent
c1193a5b8c
commit
e5a5889931
367 changed files with 710 additions and 756 deletions
217
api/analytic.go
Normal file
217
api/analytic.go
Normal file
|
@ -0,0 +1,217 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
analytic2 "github.com/0xJacky/Nginx-UI/internal/analytic"
|
||||
"github.com/0xJacky/Nginx-UI/internal/logger"
|
||||
"github.com/shirou/gopsutil/v3/cpu"
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
"github.com/shirou/gopsutil/v3/load"
|
||||
"github.com/shirou/gopsutil/v3/net"
|
||||
"github.com/spf13/cast"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
type CPUStat struct {
|
||||
User float64 `json:"user"`
|
||||
System float64 `json:"system"`
|
||||
Idle float64 `json:"idle"`
|
||||
Total float64 `json:"total"`
|
||||
}
|
||||
|
||||
type Stat struct {
|
||||
Uptime uint64 `json:"uptime"`
|
||||
LoadAvg *load.AvgStat `json:"loadavg"`
|
||||
CPU CPUStat `json:"cpu"`
|
||||
Memory analytic2.MemStat `json:"memory"`
|
||||
Disk analytic2.DiskStat `json:"disk"`
|
||||
Network net.IOCountersStat `json:"network"`
|
||||
}
|
||||
|
||||
func Analytic(c *gin.Context) {
|
||||
var upGrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
// upgrade http to websocket
|
||||
ws, err := upGrader.Upgrade(c.Writer, c.Request, nil)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer ws.Close()
|
||||
|
||||
var stat Stat
|
||||
|
||||
for {
|
||||
stat.Memory, err = analytic2.GetMemoryStat()
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
cpuTimesBefore, _ := cpu.Times(false)
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
cpuTimesAfter, _ := cpu.Times(false)
|
||||
threadNum := runtime.GOMAXPROCS(0)
|
||||
cpuUserUsage := (cpuTimesAfter[0].User - cpuTimesBefore[0].User) / (float64(1000*threadNum) / 1000)
|
||||
cpuSystemUsage := (cpuTimesAfter[0].System - cpuTimesBefore[0].System) / (float64(1000*threadNum) / 1000)
|
||||
|
||||
stat.CPU = CPUStat{
|
||||
User: cast.ToFloat64(fmt.Sprintf("%.2f", cpuUserUsage*100)),
|
||||
System: cast.ToFloat64(fmt.Sprintf("%.2f", cpuSystemUsage*100)),
|
||||
Idle: cast.ToFloat64(fmt.Sprintf("%.2f", (1-cpuUserUsage-cpuSystemUsage)*100)),
|
||||
Total: cast.ToFloat64(fmt.Sprintf("%.2f", (cpuUserUsage+cpuSystemUsage)*100)),
|
||||
}
|
||||
|
||||
stat.Uptime, _ = host.Uptime()
|
||||
|
||||
stat.LoadAvg, _ = load.Avg()
|
||||
|
||||
stat.Disk, err = analytic2.GetDiskStat()
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
network, _ := net.IOCounters(false)
|
||||
|
||||
if len(network) > 0 {
|
||||
stat.Network = network[0]
|
||||
}
|
||||
|
||||
// write
|
||||
err = ws.WriteJSON(stat)
|
||||
if err != nil || websocket.IsUnexpectedCloseError(err,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived,
|
||||
websocket.CloseNormalClosure) {
|
||||
logger.Error(err)
|
||||
break
|
||||
}
|
||||
time.Sleep(800 * time.Microsecond)
|
||||
}
|
||||
}
|
||||
|
||||
func GetAnalyticInit(c *gin.Context) {
|
||||
cpuInfo, _ := cpu.Info()
|
||||
network, _ := net.IOCounters(false)
|
||||
memory, err := analytic2.GetMemoryStat()
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
diskStat, err := analytic2.GetDiskStat()
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
var _net net.IOCountersStat
|
||||
if len(network) > 0 {
|
||||
_net = network[0]
|
||||
}
|
||||
hostInfo, _ := host.Info()
|
||||
|
||||
switch hostInfo.Platform {
|
||||
case "ubuntu":
|
||||
hostInfo.Platform = "Ubuntu"
|
||||
case "centos":
|
||||
hostInfo.Platform = "CentOS"
|
||||
}
|
||||
|
||||
loadAvg, _ := load.Avg()
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"host": hostInfo,
|
||||
"cpu": gin.H{
|
||||
"info": cpuInfo,
|
||||
"user": analytic2.CpuUserRecord,
|
||||
"total": analytic2.CpuTotalRecord,
|
||||
},
|
||||
"network": gin.H{
|
||||
"init": _net,
|
||||
"bytesRecv": analytic2.NetRecvRecord,
|
||||
"bytesSent": analytic2.NetSentRecord,
|
||||
},
|
||||
"disk_io": gin.H{
|
||||
"writes": analytic2.DiskWriteRecord,
|
||||
"reads": analytic2.DiskReadRecord,
|
||||
},
|
||||
"memory": memory,
|
||||
"disk": diskStat,
|
||||
"loadavg": loadAvg,
|
||||
})
|
||||
}
|
||||
|
||||
func GetNodeStat(c *gin.Context) {
|
||||
var upGrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
// upgrade http to websocket
|
||||
ws, err := upGrader.Upgrade(c.Writer, c.Request, nil)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer ws.Close()
|
||||
|
||||
for {
|
||||
// write
|
||||
err = ws.WriteJSON(analytic2.GetNodeStat())
|
||||
if err != nil || websocket.IsUnexpectedCloseError(err,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived,
|
||||
websocket.CloseNormalClosure) {
|
||||
logger.Error(err)
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func GetNodesAnalytic(c *gin.Context) {
|
||||
var upGrader = websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
// upgrade http to websocket
|
||||
ws, err := upGrader.Upgrade(c.Writer, c.Request, nil)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer ws.Close()
|
||||
|
||||
for {
|
||||
// write
|
||||
err = ws.WriteJSON(analytic2.NodeMap)
|
||||
if err != nil || websocket.IsUnexpectedCloseError(err,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived,
|
||||
websocket.CloseNormalClosure) {
|
||||
logger.Error(err)
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue