loop performance optimizations / 1 (#3313)

* rangeValCopy: each iteration copies 248 bytes
* rangeValCopy: each iteration copies 576 bytes
* rangeValCopy: each iteration copies 376 bytes
* rangeValCopy: each iteration copies 312 bytes
* enable linter: gocritic/rangeValCopy
This commit is contained in:
mmetc 2024-12-05 18:04:26 +01:00 committed by GitHub
parent 7a1ad8376a
commit 411bb48a81
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 89 additions and 37 deletions

View file

@ -183,7 +183,6 @@ linters-settings:
- ifElseChain - ifElseChain
- importShadow - importShadow
- hugeParam - hugeParam
- rangeValCopy
- commentedOutCode - commentedOutCode
- commentedOutImport - commentedOutImport
- unnamedResult - unnamedResult
@ -465,3 +464,23 @@ issues:
- recvcheck - recvcheck
path: "pkg/cwhub/item.go" path: "pkg/cwhub/item.go"
text: 'the methods of "Item" use pointer receiver and non-pointer receiver.' text: 'the methods of "Item" use pointer receiver and non-pointer receiver.'
- linters:
- gocritic
path: "cmd/crowdsec-cli"
text: "rangeValCopy: .*"
- linters:
- gocritic
path: "pkg/(cticlient|hubtest)"
text: "rangeValCopy: .*"
- linters:
- gocritic
path: "(.+)_test.go"
text: "rangeValCopy: .*"
- linters:
- gocritic
path: "pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers)"
text: "rangeValCopy: .*"

View file

@ -66,6 +66,7 @@ func (ka *KubernetesAuditSource) GetAggregMetrics() []prometheus.Collector {
func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error {
k8sConfig := KubernetesAuditConfiguration{} k8sConfig := KubernetesAuditConfiguration{}
err := yaml.UnmarshalStrict(yamlConfig, &k8sConfig) err := yaml.UnmarshalStrict(yamlConfig, &k8sConfig)
if err != nil { if err != nil {
return fmt.Errorf("cannot parse k8s-audit configuration: %w", err) return fmt.Errorf("cannot parse k8s-audit configuration: %w", err)
@ -92,6 +93,7 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error {
if ka.config.Mode == "" { if ka.config.Mode == "" {
ka.config.Mode = configuration.TAIL_MODE ka.config.Mode = configuration.TAIL_MODE
} }
return nil return nil
} }
@ -116,6 +118,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met
} }
ka.mux.HandleFunc(ka.config.WebhookPath, ka.webhookHandler) ka.mux.HandleFunc(ka.config.WebhookPath, ka.webhookHandler)
return nil return nil
} }
@ -137,6 +140,7 @@ func (ka *KubernetesAuditSource) OneShotAcquisition(_ context.Context, _ chan ty
func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error {
ka.outChan = out ka.outChan = out
t.Go(func() error { t.Go(func() error {
defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live")
ka.logger.Infof("Starting k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) ka.logger.Infof("Starting k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath)
@ -145,13 +149,16 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c
if err != nil && err != http.ErrServerClosed { if err != nil && err != http.ErrServerClosed {
return fmt.Errorf("k8s-audit server failed: %w", err) return fmt.Errorf("k8s-audit server failed: %w", err)
} }
return nil return nil
}) })
<-t.Dying() <-t.Dying()
ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath)
ka.server.Shutdown(ctx) ka.server.Shutdown(ctx)
return nil return nil
}) })
return nil return nil
} }
@ -167,42 +174,52 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R
if ka.metricsLevel != configuration.METRICS_NONE { if ka.metricsLevel != configuration.METRICS_NONE {
requestCount.WithLabelValues(ka.addr).Inc() requestCount.WithLabelValues(ka.addr).Inc()
} }
if r.Method != http.MethodPost { if r.Method != http.MethodPost {
w.WriteHeader(http.StatusMethodNotAllowed) w.WriteHeader(http.StatusMethodNotAllowed)
return return
} }
ka.logger.Tracef("webhookHandler called") ka.logger.Tracef("webhookHandler called")
var auditEvents audit.EventList var auditEvents audit.EventList
jsonBody, err := io.ReadAll(r.Body) jsonBody, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
ka.logger.Errorf("Error reading request body: %v", err) ka.logger.Errorf("Error reading request body: %v", err)
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return
} }
ka.logger.Tracef("webhookHandler receveid: %s", string(jsonBody)) ka.logger.Tracef("webhookHandler receveid: %s", string(jsonBody))
err = json.Unmarshal(jsonBody, &auditEvents) err = json.Unmarshal(jsonBody, &auditEvents)
if err != nil { if err != nil {
ka.logger.Errorf("Error decoding audit events: %s", err) ka.logger.Errorf("Error decoding audit events: %s", err)
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return
} }
remoteIP := strings.Split(r.RemoteAddr, ":")[0] remoteIP := strings.Split(r.RemoteAddr, ":")[0]
for _, auditEvent := range auditEvents.Items {
for idx := range auditEvents.Items {
if ka.metricsLevel != configuration.METRICS_NONE { if ka.metricsLevel != configuration.METRICS_NONE {
eventCount.WithLabelValues(ka.addr).Inc() eventCount.WithLabelValues(ka.addr).Inc()
} }
bytesEvent, err := json.Marshal(auditEvent)
bytesEvent, err := json.Marshal(auditEvents.Items[idx])
if err != nil { if err != nil {
ka.logger.Errorf("Error serializing audit event: %s", err) ka.logger.Errorf("Error serializing audit event: %s", err)
continue continue
} }
ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) ka.logger.Tracef("Got audit event: %s", string(bytesEvent))
l := types.Line{ l := types.Line{
Raw: string(bytesEvent), Raw: string(bytesEvent),
Labels: ka.config.Labels, Labels: ka.config.Labels,
Time: auditEvent.StageTimestamp.Time, Time: auditEvents.Items[idx].StageTimestamp.Time,
Src: remoteIP, Src: remoteIP,
Process: true, Process: true,
Module: ka.GetName(), Module: ka.GetName(),

View file

@ -198,22 +198,24 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro
func EventsFromQueue(queue *types.Queue) []*models.Event { func EventsFromQueue(queue *types.Queue) []*models.Event {
events := []*models.Event{} events := []*models.Event{}
for _, evt := range queue.Queue { qEvents := queue.GetQueue()
if evt.Meta == nil {
for idx := range qEvents {
if qEvents[idx].Meta == nil {
continue continue
} }
meta := models.Meta{} meta := models.Meta{}
// we want consistence // we want consistence
skeys := make([]string, 0, len(evt.Meta)) skeys := make([]string, 0, len(qEvents[idx].Meta))
for k := range evt.Meta { for k := range qEvents[idx].Meta {
skeys = append(skeys, k) skeys = append(skeys, k)
} }
sort.Strings(skeys) sort.Strings(skeys)
for _, k := range skeys { for _, k := range skeys {
v := evt.Meta[k] v := qEvents[idx].Meta[k]
subMeta := models.MetaItems0{Key: k, Value: v} subMeta := models.MetaItems0{Key: k, Value: v}
meta = append(meta, &subMeta) meta = append(meta, &subMeta)
} }
@ -223,15 +225,15 @@ func EventsFromQueue(queue *types.Queue) []*models.Event {
Meta: meta, Meta: meta,
} }
// either MarshaledTime is present and is extracted from log // either MarshaledTime is present and is extracted from log
if evt.MarshaledTime != "" { if qEvents[idx].MarshaledTime != "" {
tmpTimeStamp := evt.MarshaledTime tmpTimeStamp := qEvents[idx].MarshaledTime
ovflwEvent.Timestamp = &tmpTimeStamp ovflwEvent.Timestamp = &tmpTimeStamp
} else if !evt.Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() } else if !qEvents[idx].Time.IsZero() { // or .Time has been set during parse as time.Now().UTC()
ovflwEvent.Timestamp = new(string) ovflwEvent.Timestamp = new(string)
raw, err := evt.Time.MarshalText() raw, err := qEvents[idx].Time.MarshalText()
if err != nil { if err != nil {
log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) log.Warningf("while serializing time '%s' : %s", qEvents[idx].Time.String(), err)
} else { } else {
*ovflwEvent.Timestamp = string(raw) *ovflwEvent.Timestamp = string(raw)
} }
@ -253,8 +255,9 @@ func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Sour
log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter)
for _, evt := range queue.Queue { qEvents := queue.GetQueue()
srcs, err := SourceFromEvent(evt, leaky) for idx := range qEvents {
srcs, err := SourceFromEvent(qEvents[idx], leaky)
if err != nil { if err != nil {
return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err) return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err)
} }

View file

@ -3,6 +3,7 @@ package parser
import ( import (
"errors" "errors"
"fmt" "fmt"
"strconv"
"strings" "strings"
"time" "time"
@ -236,7 +237,7 @@ func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool,
case string: case string:
gstr = out gstr = out
case int: case int:
gstr = fmt.Sprintf("%d", out) gstr = strconv.Itoa(out)
case float64, float32: case float64, float32:
gstr = fmt.Sprintf("%f", out) gstr = fmt.Sprintf("%f", out)
default: default:
@ -357,16 +358,17 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri
} }
// Iterate on leafs // Iterate on leafs
for _, leaf := range n.LeavesNodes { leaves := n.LeavesNodes
ret, err := leaf.process(p, ctx, cachedExprEnv) for idx := range leaves {
ret, err := leaves[idx].process(p, ctx, cachedExprEnv)
if err != nil { if err != nil {
clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) clog.Tracef("\tNode (%s) failed : %v", leaves[idx].rn, err)
clog.Debugf("Event leaving node : ko") clog.Debugf("Event leaving node : ko")
return false, err return false, err
} }
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaves[idx].rn, ret, n.OnSuccess)
if ret { if ret {
NodeState = true NodeState = true
@ -593,7 +595,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error {
/* compile leafs if present */ /* compile leafs if present */
for idx := range n.LeavesNodes { for idx := range n.LeavesNodes {
if n.LeavesNodes[idx].Name == "" { if n.LeavesNodes[idx].Name == "" {
n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) n.LeavesNodes[idx].Name = "child-" + n.Name
} }
/*propagate debug/stats to child nodes*/ /*propagate debug/stats to child nodes*/
if !n.LeavesNodes[idx].Debug && n.Debug { if !n.LeavesNodes[idx].Debug && n.Debug {

View file

@ -29,10 +29,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
return false return false
} }
//it's a hack, we do it for the user // it's a hack, we do it for the user
target = strings.TrimPrefix(target, "evt.") target = strings.TrimPrefix(target, "evt.")
log.Debugf("setting target %s to %s", target, value) log.Debugf("setting target %s to %s", target, value)
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
log.Errorf("Runtime error while trying to set '%s': %+v", target, r) log.Errorf("Runtime error while trying to set '%s': %+v", target, r)
@ -46,6 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
//event is nil //event is nil
return false return false
} }
for _, f := range strings.Split(target, ".") { for _, f := range strings.Split(target, ".") {
/* /*
** According to current Event layout we only have to handle struct and map ** According to current Event layout we only have to handle struct and map
@ -57,7 +59,9 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
if (tmp == reflect.Value{}) || tmp.IsZero() { if (tmp == reflect.Value{}) || tmp.IsZero() {
log.Debugf("map entry is zero in '%s'", target) log.Debugf("map entry is zero in '%s'", target)
} }
iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value)) iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value))
return true return true
case reflect.Struct: case reflect.Struct:
tmp := iter.FieldByName(f) tmp := iter.FieldByName(f)
@ -65,9 +69,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
log.Debugf("'%s' is not a valid target because '%s' is not valid", target, f) log.Debugf("'%s' is not a valid target because '%s' is not valid", target, f)
return false return false
} }
if tmp.Kind() == reflect.Ptr { if tmp.Kind() == reflect.Ptr {
tmp = reflect.Indirect(tmp) tmp = reflect.Indirect(tmp)
} }
iter = tmp iter = tmp
case reflect.Ptr: case reflect.Ptr:
tmp := iter.Elem() tmp := iter.Elem()
@ -82,11 +88,14 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
log.Errorf("'%s' can't be set", target) log.Errorf("'%s' can't be set", target)
return false return false
} }
if iter.Kind() != reflect.String { if iter.Kind() != reflect.String {
log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target) log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target)
return false return false
} }
iter.Set(reflect.ValueOf(value)) iter.Set(reflect.ValueOf(value))
return true return true
} }
@ -321,46 +330,46 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error)
} }
isStageOK := false isStageOK := false
for idx, node := range nodes { for idx := range nodes {
//Only process current stage's nodes //Only process current stage's nodes
if event.Stage != node.Stage { if event.Stage != nodes[idx].Stage {
continue continue
} }
clog := log.WithFields(log.Fields{ clog := log.WithFields(log.Fields{
"node-name": node.rn, "node-name": nodes[idx].rn,
"stage": event.Stage, "stage": event.Stage,
}) })
clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), nodes[idx].rn)
if ctx.Profiling { if ctx.Profiling {
node.Profiling = true nodes[idx].Profiling = true
} }
ret, err := node.process(&event, ctx, map[string]interface{}{"evt": &event}) ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event})
if err != nil { if err != nil {
clog.Errorf("Error while processing node : %v", err) clog.Errorf("Error while processing node : %v", err)
return event, err return event, err
} }
clog.Tracef("node (%s) ret : %v", node.rn, ret) clog.Tracef("node (%s) ret : %v", nodes[idx].rn, ret)
if ParseDump { if ParseDump {
var parserIdxInStage int var parserIdxInStage int
StageParseMutex.Lock() StageParseMutex.Lock()
if len(StageParseCache[stage][node.Name]) == 0 { if len(StageParseCache[stage][nodes[idx].Name]) == 0 {
StageParseCache[stage][node.Name] = make([]dumps.ParserResult, 0) StageParseCache[stage][nodes[idx].Name] = make([]dumps.ParserResult, 0)
parserIdxInStage = len(StageParseCache[stage]) parserIdxInStage = len(StageParseCache[stage])
} else { } else {
parserIdxInStage = StageParseCache[stage][node.Name][0].Idx parserIdxInStage = StageParseCache[stage][nodes[idx].Name][0].Idx
} }
StageParseMutex.Unlock() StageParseMutex.Unlock()
evtcopy := deepcopy.Copy(event) evtcopy := deepcopy.Copy(event)
parserInfo := dumps.ParserResult{Evt: evtcopy.(types.Event), Success: ret, Idx: parserIdxInStage} parserInfo := dumps.ParserResult{Evt: evtcopy.(types.Event), Success: ret, Idx: parserIdxInStage}
StageParseMutex.Lock() StageParseMutex.Lock()
StageParseCache[stage][node.Name] = append(StageParseCache[stage][node.Name], parserInfo) StageParseCache[stage][nodes[idx].Name] = append(StageParseCache[stage][nodes[idx].Name], parserInfo)
StageParseMutex.Unlock() StageParseMutex.Unlock()
} }
if ret { if ret {
isStageOK = true isStageOK = true
} }
if ret && node.OnSuccess == "next_stage" { if ret && nodes[idx].OnSuccess == "next_stage" {
clog.Debugf("node successful, stop end stage %s", stage) clog.Debugf("node successful, stop end stage %s", stage)
break break
} }

View file

@ -60,6 +60,7 @@ func MakeEvent(timeMachine bool, evtType int, process bool) Event {
if timeMachine { if timeMachine {
evt.ExpectMode = TIMEMACHINE evt.ExpectMode = TIMEMACHINE
} }
return evt return evt
} }
@ -97,8 +98,9 @@ func (e *Event) GetType() string {
func (e *Event) GetMeta(key string) string { func (e *Event) GetMeta(key string) string {
if e.Type == OVFLW { if e.Type == OVFLW {
for _, alert := range e.Overflow.APIAlerts { alerts := e.Overflow.APIAlerts
for _, event := range alert.Events { for idx := range alerts {
for _, event := range alerts[idx].Events {
if event.GetMeta(key) != "" { if event.GetMeta(key) != "" {
return event.GetMeta(key) return event.GetMeta(key)
} }