mirror of
https://github.com/0xJacky/nginx-ui.git
synced 2025-05-11 18:35:51 +02:00
feat(nginx): performance optimization #850
This commit is contained in:
parent
b59da3e7e8
commit
9d4070a211
29 changed files with 4250 additions and 2031 deletions
|
@ -1,7 +1,7 @@
|
|||
package nginx
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -10,29 +10,51 @@ import (
|
|||
)
|
||||
|
||||
type NginxConfigInfo struct {
|
||||
WorkerProcesses int `json:"worker_processes"`
|
||||
WorkerConnections int `json:"worker_connections"`
|
||||
ProcessMode string `json:"process_mode"`
|
||||
WorkerProcesses int `json:"worker_processes"`
|
||||
WorkerConnections int `json:"worker_connections"`
|
||||
ProcessMode string `json:"process_mode"`
|
||||
KeepaliveTimeout int `json:"keepalive_timeout"`
|
||||
Gzip string `json:"gzip"`
|
||||
GzipMinLength int `json:"gzip_min_length"`
|
||||
GzipCompLevel int `json:"gzip_comp_level"`
|
||||
ClientMaxBodySize string `json:"client_max_body_size"` // with unit
|
||||
ServerNamesHashBucketSize int `json:"server_names_hash_bucket_size"`
|
||||
ClientHeaderBufferSize string `json:"client_header_buffer_size"` // with unit
|
||||
ClientBodyBufferSize string `json:"client_body_buffer_size"` // with unit
|
||||
}
|
||||
|
||||
// GetNginxWorkerConfigInfo Get Nginx config info of worker_processes and worker_connections
|
||||
func GetNginxWorkerConfigInfo() (*NginxConfigInfo, error) {
|
||||
result := &NginxConfigInfo{
|
||||
WorkerProcesses: 1,
|
||||
WorkerConnections: 1024,
|
||||
ProcessMode: "manual",
|
||||
WorkerProcesses: 1,
|
||||
WorkerConnections: 1024,
|
||||
ProcessMode: "manual",
|
||||
KeepaliveTimeout: 65,
|
||||
Gzip: "off",
|
||||
GzipMinLength: 1,
|
||||
GzipCompLevel: 1,
|
||||
ClientMaxBodySize: "1m",
|
||||
ServerNamesHashBucketSize: 32,
|
||||
ClientHeaderBufferSize: "1k",
|
||||
ClientBodyBufferSize: "8k",
|
||||
}
|
||||
|
||||
// Get worker_processes config
|
||||
cmd := exec.Command("nginx", "-T")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "failed to get nginx config")
|
||||
confPath := GetConfPath("nginx.conf")
|
||||
if confPath == "" {
|
||||
return nil, errors.New("failed to get nginx.conf path")
|
||||
}
|
||||
|
||||
// Read the current configuration
|
||||
content, err := os.ReadFile(confPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read nginx.conf")
|
||||
}
|
||||
|
||||
outputStr := string(content)
|
||||
|
||||
// Parse worker_processes
|
||||
wpRe := regexp.MustCompile(`worker_processes\s+(\d+|auto);`)
|
||||
if matches := wpRe.FindStringSubmatch(string(output)); len(matches) > 1 {
|
||||
if matches := wpRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
if matches[1] == "auto" {
|
||||
result.WorkerProcesses = runtime.NumCPU()
|
||||
result.ProcessMode = "auto"
|
||||
|
@ -44,9 +66,57 @@ func GetNginxWorkerConfigInfo() (*NginxConfigInfo, error) {
|
|||
|
||||
// Parse worker_connections
|
||||
wcRe := regexp.MustCompile(`worker_connections\s+(\d+);`)
|
||||
if matches := wcRe.FindStringSubmatch(string(output)); len(matches) > 1 {
|
||||
if matches := wcRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.WorkerConnections, _ = strconv.Atoi(matches[1])
|
||||
}
|
||||
|
||||
// Parse keepalive_timeout
|
||||
ktRe := regexp.MustCompile(`keepalive_timeout\s+(\d+);`)
|
||||
if matches := ktRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.KeepaliveTimeout, _ = strconv.Atoi(matches[1])
|
||||
}
|
||||
|
||||
// Parse gzip
|
||||
gzipRe := regexp.MustCompile(`gzip\s+(on|off);`)
|
||||
if matches := gzipRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.Gzip = matches[1]
|
||||
}
|
||||
|
||||
// Parse gzip_min_length
|
||||
gzipMinRe := regexp.MustCompile(`gzip_min_length\s+(\d+);`)
|
||||
if matches := gzipMinRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.GzipMinLength, _ = strconv.Atoi(matches[1])
|
||||
}
|
||||
|
||||
// Parse gzip_comp_level
|
||||
gzipCompRe := regexp.MustCompile(`gzip_comp_level\s+(\d+);`)
|
||||
if matches := gzipCompRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.GzipCompLevel, _ = strconv.Atoi(matches[1])
|
||||
}
|
||||
|
||||
// Parse client_max_body_size with any unit (k, m, g)
|
||||
cmaxRe := regexp.MustCompile(`client_max_body_size\s+(\d+[kmg]?);`)
|
||||
if matches := cmaxRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.ClientMaxBodySize = matches[1]
|
||||
}
|
||||
|
||||
// Parse server_names_hash_bucket_size
|
||||
hashRe := regexp.MustCompile(`server_names_hash_bucket_size\s+(\d+);`)
|
||||
if matches := hashRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.ServerNamesHashBucketSize, _ = strconv.Atoi(matches[1])
|
||||
}
|
||||
|
||||
// Parse client_header_buffer_size with any unit (k, m, g)
|
||||
headerRe := regexp.MustCompile(`client_header_buffer_size\s+(\d+[kmg]?);`)
|
||||
if matches := headerRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.ClientHeaderBufferSize = matches[1]
|
||||
}
|
||||
|
||||
// Parse client_body_buffer_size with any unit (k, m, g)
|
||||
bodyRe := regexp.MustCompile(`client_body_buffer_size\s+(\d+[kmg]?);`)
|
||||
if matches := bodyRe.FindStringSubmatch(outputStr); len(matches) > 1 {
|
||||
result.ClientBodyBufferSize = matches[1]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
148
internal/nginx/perf_opt.go
Normal file
148
internal/nginx/perf_opt.go
Normal file
|
@ -0,0 +1,148 @@
|
|||
package nginx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tufanbarisyildirim/gonginx/config"
|
||||
"github.com/tufanbarisyildirim/gonginx/dumper"
|
||||
"github.com/tufanbarisyildirim/gonginx/parser"
|
||||
)
|
||||
|
||||
// PerfOpt represents Nginx performance optimization settings
|
||||
type PerfOpt struct {
|
||||
WorkerProcesses string `json:"worker_processes"` // auto or number
|
||||
WorkerConnections string `json:"worker_connections"` // max connections
|
||||
KeepaliveTimeout string `json:"keepalive_timeout"` // timeout in seconds
|
||||
Gzip string `json:"gzip"` // on or off
|
||||
GzipMinLength string `json:"gzip_min_length"` // min length to compress
|
||||
GzipCompLevel string `json:"gzip_comp_level"` // compression level
|
||||
ClientMaxBodySize string `json:"client_max_body_size"` // max body size (with unit: k, m, g)
|
||||
ServerNamesHashBucketSize string `json:"server_names_hash_bucket_size"` // hash bucket size
|
||||
ClientHeaderBufferSize string `json:"client_header_buffer_size"` // header buffer size (with unit: k, m, g)
|
||||
ClientBodyBufferSize string `json:"client_body_buffer_size"` // body buffer size (with unit: k, m, g)
|
||||
}
|
||||
|
||||
// UpdatePerfOpt updates the Nginx performance optimization settings
|
||||
func UpdatePerfOpt(opt *PerfOpt) error {
|
||||
confPath := GetConfPath("nginx.conf")
|
||||
if confPath == "" {
|
||||
return errors.New("failed to get nginx.conf path")
|
||||
}
|
||||
|
||||
// Read the current configuration
|
||||
content, err := os.ReadFile(confPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read nginx.conf")
|
||||
}
|
||||
|
||||
// Create a backup file
|
||||
backupPath := fmt.Sprintf("%s.backup.%d", confPath, time.Now().Unix())
|
||||
err = os.WriteFile(backupPath, content, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create backup file")
|
||||
}
|
||||
|
||||
// Parse the configuration
|
||||
p := parser.NewStringParser(string(content), parser.WithSkipValidDirectivesErr())
|
||||
conf, err := p.Parse()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse nginx.conf")
|
||||
}
|
||||
|
||||
// Process the configuration and update performance settings
|
||||
updateNginxConfig(conf.Block, opt)
|
||||
|
||||
// Dump the updated configuration
|
||||
updatedConf := dumper.DumpBlock(conf.Block, dumper.IndentedStyle)
|
||||
|
||||
// Write the updated configuration
|
||||
err = os.WriteFile(confPath, []byte(updatedConf), 0644)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write updated nginx.conf")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateNginxConfig updates the performance settings in the Nginx configuration
|
||||
func updateNginxConfig(block config.IBlock, opt *PerfOpt) {
|
||||
if block == nil {
|
||||
return
|
||||
}
|
||||
|
||||
directives := block.GetDirectives()
|
||||
// Update main context directives
|
||||
updateOrAddDirective(block, directives, "worker_processes", opt.WorkerProcesses)
|
||||
|
||||
// Look for events, http, and other blocks
|
||||
for _, directive := range directives {
|
||||
if directive.GetName() == "events" && directive.GetBlock() != nil {
|
||||
// Update events block directives
|
||||
eventsBlock := directive.GetBlock()
|
||||
eventsDirectives := eventsBlock.GetDirectives()
|
||||
updateOrAddDirective(eventsBlock, eventsDirectives, "worker_connections", opt.WorkerConnections)
|
||||
} else if directive.GetName() == "http" && directive.GetBlock() != nil {
|
||||
// Update http block directives
|
||||
httpBlock := directive.GetBlock()
|
||||
httpDirectives := httpBlock.GetDirectives()
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "keepalive_timeout", opt.KeepaliveTimeout)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "gzip", opt.Gzip)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "gzip_min_length", opt.GzipMinLength)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "gzip_comp_level", opt.GzipCompLevel)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "client_max_body_size", opt.ClientMaxBodySize)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "server_names_hash_bucket_size", opt.ServerNamesHashBucketSize)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "client_header_buffer_size", opt.ClientHeaderBufferSize)
|
||||
updateOrAddDirective(httpBlock, httpDirectives, "client_body_buffer_size", opt.ClientBodyBufferSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateOrAddDirective updates a directive if it exists, or adds it to the block if it doesn't
|
||||
func updateOrAddDirective(block config.IBlock, directives []config.IDirective, name string, value string) {
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Search for existing directive
|
||||
for _, directive := range directives {
|
||||
if directive.GetName() == name {
|
||||
// Update existing directive
|
||||
if len(directive.GetParameters()) > 0 {
|
||||
directive.GetParameters()[0].Value = value
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, we need to add a new directive
|
||||
// Create a new directive and add it to the block
|
||||
// This requires knowledge of the underlying implementation
|
||||
// For now, we'll use the Directive type from gonginx/config
|
||||
newDirective := &config.Directive{
|
||||
Name: name,
|
||||
Parameters: []config.Parameter{{Value: value}},
|
||||
}
|
||||
|
||||
// Add the new directive to the block
|
||||
// This is specific to the gonginx library implementation
|
||||
switch block := block.(type) {
|
||||
case *config.Config:
|
||||
block.Block.Directives = append(block.Block.Directives, newDirective)
|
||||
case *config.Block:
|
||||
block.Directives = append(block.Directives, newDirective)
|
||||
case *config.HTTP:
|
||||
block.Directives = append(block.Directives, newDirective)
|
||||
}
|
||||
}
|
||||
|
||||
// sortDirectives sorts directives alphabetically by name
|
||||
func sortDirectives(directives []config.IDirective) {
|
||||
sort.SliceStable(directives, func(i, j int) bool {
|
||||
// Ensure both i and j can return valid names
|
||||
return directives[i].GetName() < directives[j].GetName()
|
||||
})
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue