lint/gocritic: enable importShadow, typeUnparen, unnecessaryDefer (#3583)

This commit is contained in:
mmetc 2025-04-24 11:12:38 +02:00 committed by GitHub
parent 34e306505c
commit 418a27596e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 62 additions and 65 deletions

View file

@ -158,21 +158,18 @@ linters:
disabled-checks: disabled-checks:
- paramTypeCombine - paramTypeCombine
- ifElseChain - ifElseChain
- importShadow
- hugeParam - hugeParam
- commentedOutCode - commentedOutCode
- commentedOutImport - commentedOutImport
- unnamedResult - unnamedResult
- sloppyReassign - sloppyReassign
- appendCombine - appendCombine
- typeUnparen
- commentFormatting - commentFormatting
- deferInLoop # - deferInLoop #
- whyNoLint - whyNoLint
- equalFold # - equalFold #
- unnecessaryBlock # - unnecessaryBlock #
- tooManyResultsChecker - tooManyResultsChecker
- unnecessaryDefer
- docStub - docStub
- preferFprint - preferFprint

View file

@ -312,7 +312,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command {
case "human": case "human":
cmdConsoleStatusTable(color.Output, cfg.Cscli.Color, *consoleCfg) cmdConsoleStatusTable(color.Output, cfg.Cscli.Color, *consoleCfg)
case "json": case "json":
out := map[string](*bool){ out := map[string]*bool{
csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions, csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions,
csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios, csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios,
csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios, csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios,

View file

@ -99,12 +99,12 @@ func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Eve
alert.EventsCount = ptr.Of(int32(len(alert.Events))) alert.EventsCount = ptr.Of(int32(len(alert.Events)))
alert.Leakspeed = ptr.Of("") alert.Leakspeed = ptr.Of("")
alert.Scenario = ptr.Of(inEvt.Appsec.MatchedRules.GetName()) alert.Scenario = ptr.Of(inEvt.Appsec.GetName())
alert.ScenarioHash = ptr.Of(inEvt.Appsec.MatchedRules.GetHash()) alert.ScenarioHash = ptr.Of(inEvt.Appsec.GetHash())
alert.ScenarioVersion = ptr.Of(inEvt.Appsec.MatchedRules.GetVersion()) alert.ScenarioVersion = ptr.Of(inEvt.Appsec.GetVersion())
alert.Simulated = ptr.Of(false) alert.Simulated = ptr.Of(false)
alert.Source = &source alert.Source = &source
msg := fmt.Sprintf("AppSec block: %s from %s (%s)", inEvt.Appsec.MatchedRules.GetName(), msg := fmt.Sprintf("AppSec block: %s from %s (%s)", inEvt.Appsec.GetName(),
alert.Source.IP, inEvt.Parsed["remediation_cmpt_ip"]) alert.Source.IP, inEvt.Parsed["remediation_cmpt_ip"])
alert.Message = &msg alert.Message = &msg
alert.StartAt = ptr.Of(time.Now().UTC().Format(time.RFC3339)) alert.StartAt = ptr.Of(time.Now().UTC().Format(time.RFC3339))
@ -278,7 +278,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR
matchedZones = append(matchedZones, zone) matchedZones = append(matchedZones, zone)
} }
corazaRule := map[string]interface{}{ corazaRule := map[string]any{
"id": rule.Rule().ID(), "id": rule.Rule().ID(),
"uri": evt.Parsed["target_uri"], "uri": evt.Parsed["target_uri"],
"rule_type": kind, "rule_type": kind,

View file

@ -57,7 +57,7 @@ type CloudwatchSuite struct {
suite.Suite suite.Suite
} }
func (s *CloudwatchSuite) SetupSuite() { func (*CloudwatchSuite) SetupSuite() {
def_PollNewStreamInterval = 1 * time.Second def_PollNewStreamInterval = 1 * time.Second
def_PollStreamInterval = 1 * time.Second def_PollStreamInterval = 1 * time.Second
def_StreamReadTimeout = 10 * time.Second def_StreamReadTimeout = 10 * time.Second
@ -308,7 +308,7 @@ stream_name: test_stream`,
}) })
require.NoError(t, err) require.NoError(t, err)
}, },
run: func(t *testing.T, cw *CloudwatchSource) { run: func(_ *testing.T, _ *CloudwatchSource) {
// wait for new stream pickup + stream poll interval // wait for new stream pickup + stream poll interval
time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) time.Sleep(def_PollNewStreamInterval + (1 * time.Second))
time.Sleep(def_PollStreamInterval + (1 * time.Second)) time.Sleep(def_PollStreamInterval + (1 * time.Second))
@ -325,7 +325,7 @@ stream_name: test_stream`,
dbgLogger.Infof("starting test") dbgLogger.Infof("starting test")
cw := CloudwatchSource{} cw := CloudwatchSource{}
err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE) err := cw.Configure([]byte(tc.config), dbgLogger, configuration.METRICS_NONE)
cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr)
if tc.expectedCfgErr != "" { if tc.expectedCfgErr != "" {
@ -442,7 +442,7 @@ stream_name: test_stream`,
dbgLogger.Logger.SetLevel(logrus.DebugLevel) dbgLogger.Logger.SetLevel(logrus.DebugLevel)
cw := CloudwatchSource{} cw := CloudwatchSource{}
err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE) err := cw.Configure([]byte(tc.config), dbgLogger, configuration.METRICS_NONE)
cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr)
if tc.expectedCfgErr != "" { if tc.expectedCfgErr != "" {

View file

@ -115,7 +115,7 @@ func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLev
return nil return nil
} }
func (k *KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { func (*KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error {
return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName) return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName)
} }
@ -123,27 +123,27 @@ func (k *KafkaSource) GetMode() string {
return k.Config.Mode return k.Config.Mode
} }
func (k *KafkaSource) GetName() string { func (*KafkaSource) GetName() string {
return dataSourceName return dataSourceName
} }
func (k *KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { func (*KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error {
return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName)
} }
func (k *KafkaSource) CanRun() error { func (*KafkaSource) CanRun() error {
return nil return nil
} }
func (k *KafkaSource) GetMetrics() []prometheus.Collector { func (*KafkaSource) GetMetrics() []prometheus.Collector {
return []prometheus.Collector{linesRead} return []prometheus.Collector{linesRead}
} }
func (k *KafkaSource) GetAggregMetrics() []prometheus.Collector { func (*KafkaSource) GetAggregMetrics() []prometheus.Collector {
return []prometheus.Collector{linesRead} return []prometheus.Collector{linesRead}
} }
func (k *KafkaSource) Dump() interface{} { func (k *KafkaSource) Dump() any {
return k return k
} }

View file

@ -399,6 +399,8 @@ func TestDeleteDecisions(t *testing.T) {
ctx := t.Context() ctx := t.Context()
mux, urlx, teardown := setup() mux, urlx, teardown := setup()
defer teardown()
mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
_, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`))
@ -433,8 +435,6 @@ func TestDeleteDecisions(t *testing.T) {
deleted, _, err := client.Decisions.Delete(ctx, filters) deleted, _, err := client.Decisions.Delete(ctx, filters)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "1", deleted.NbDeleted) assert.Equal(t, "1", deleted.NbDeleted)
defer teardown()
} }
func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) {

View file

@ -396,11 +396,7 @@ func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) {
batchSize := 50 batchSize := 50
for start := 0; start < len(cache); start += batchSize { for start := 0; start < len(cache); start += batchSize {
end := start + batchSize end := min(start+batchSize, len(cache))
if end > len(cache) {
end = len(cache)
}
if err := a.sendBatch(ctx, cache[start:end]); err != nil { if err := a.sendBatch(ctx, cache[start:end]); err != nil {
log.Errorf("sending signal to central API: %s", err) log.Errorf("sending signal to central API: %s", err)

View file

@ -251,7 +251,7 @@ func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) {
// Metrics are sent at start, then at the randomized metricsIntervalFirst, // Metrics are sent at start, then at the randomized metricsIntervalFirst,
// then at regular metricsInterval. If a change is detected in the list // then at regular metricsInterval. If a change is detected in the list
// of machines, the next metrics are sent immediately. // of machines, the next metrics are sent immediately.
func (a *apic) SendMetrics(ctx context.Context, stop chan (bool)) { func (a *apic) SendMetrics(ctx context.Context, stop chan bool) {
defer trace.CatchPanic("lapi/metricsToAPIC") defer trace.CatchPanic("lapi/metricsToAPIC")
// verify the list of machines every <checkInt> interval // verify the list of machines every <checkInt> interval

View file

@ -29,7 +29,7 @@ type JWT struct {
TlsAuth *TLSAuth TlsAuth *TLSAuth
} }
func PayloadFunc(data interface{}) jwt.MapClaims { func PayloadFunc(data any) jwt.MapClaims {
if value, ok := data.(*models.WatcherAuthRequest); ok { if value, ok := data.(*models.WatcherAuthRequest); ok {
return jwt.MapClaims{ return jwt.MapClaims{
MachineIDKey: &value.MachineID, MachineIDKey: &value.MachineID,
@ -39,7 +39,7 @@ func PayloadFunc(data interface{}) jwt.MapClaims {
return jwt.MapClaims{} return jwt.MapClaims{}
} }
func IdentityHandler(c *gin.Context) interface{} { func IdentityHandler(c *gin.Context) any {
claims := jwt.ExtractClaims(c) claims := jwt.ExtractClaims(c)
machineID := claims[MachineIDKey].(string) machineID := claims[MachineIDKey].(string)
@ -172,7 +172,7 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) {
return &ret, nil return &ret, nil
} }
func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { func (j *JWT) Authenticator(c *gin.Context) (any, error) {
var ( var (
err error err error
auth *authInput auth *authInput
@ -248,7 +248,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) {
}, nil }, nil
} }
func Authorizator(data interface{}, c *gin.Context) bool { func Authorizator(data any, c *gin.Context) bool {
return true return true
} }

View file

@ -92,17 +92,18 @@ func (pw *PluginWatcher) watchPluginTicker(pluginName string) {
threshold := pw.PluginConfigByName[pluginName].GroupThreshold threshold := pw.PluginConfigByName[pluginName].GroupThreshold
//only size is set //only size is set
if threshold > 0 && interval == 0 { switch {
case threshold > 0 && interval == 0:
watchCount = threshold watchCount = threshold
watchTime = DefaultEmptyTicker watchTime = DefaultEmptyTicker
} else if interval != 0 && threshold == 0 { case interval != 0 && threshold == 0:
//only time is set //only time is set
watchTime = interval watchTime = interval
} else if interval != 0 && threshold != 0 { case interval != 0 && threshold != 0:
//both are set //both are set
watchTime = DefaultEmptyTicker watchTime = DefaultEmptyTicker
watchCount = threshold watchCount = threshold
} else { default:
//none are set, we sent every event we receive //none are set, we sent every event we receive
watchTime = DefaultEmptyTicker watchTime = DefaultEmptyTicker
watchCount = 1 watchCount = 1

View file

@ -133,11 +133,12 @@ func handleAlertIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, en
} }
func handleAlertIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { func handleAlertIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error {
if ip_sz == 4 { switch {
case ip_sz == 4:
handleAlertIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) handleAlertIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates)
} else if ip_sz == 16 { case ip_sz == 16:
handleAlertIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) handleAlertIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates)
} else if ip_sz != 0 { case ip_sz != 0:
return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
} }

View file

@ -117,7 +117,7 @@ func (o *OpOutput) String() string {
return ret + "" return ret + ""
} }
func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { func (ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string {
locations := program.Locations() locations := program.Locations()
src := string(program.Source()) src := string(program.Source())
@ -356,7 +356,7 @@ func (erp ExprRuntimeDebug) ipSeek(ip int) []string {
return nil return nil
} }
func Run(program *vm.Program, env interface{}, logger *log.Entry, debug bool) (any, error) { func Run(program *vm.Program, env any, logger *log.Entry, debug bool) (any, error) {
if debug { if debug {
dbgInfo, ret, err := RunWithDebug(program, env, logger) dbgInfo, ret, err := RunWithDebug(program, env, logger)
DisplayExprDebug(program, dbgInfo, logger, ret) DisplayExprDebug(program, dbgInfo, logger, ret)
@ -383,7 +383,7 @@ func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry
} }
// TBD: Based on the level of the logger (ie. trace vs debug) we could decide to add more low level instructions (pop, push, etc.) // TBD: Based on the level of the logger (ie. trace vs debug) we could decide to add more low level instructions (pop, push, etc.)
func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]OpOutput, any, error) { func RunWithDebug(program *vm.Program, env any, logger *log.Entry) ([]OpOutput, any, error) {
outputs := []OpOutput{} outputs := []OpOutput{}
erp := ExprRuntimeDebug{ erp := ExprRuntimeDebug{
Logger: logger, Logger: logger,

View file

@ -875,11 +875,12 @@ func ParseKV(params ...any) (any, error) {
value := "" value := ""
for i, name := range keyValuePattern.SubexpNames() { for i, name := range keyValuePattern.SubexpNames() {
if name == "key" { switch {
case name == "key":
key = match[i] key = match[i]
} else if name == "quoted_value" && match[i] != "" { case name == "quoted_value" && match[i] != "":
value = match[i] value = match[i]
} else if name == "value" && match[i] != "" { case name == "value" && match[i] != "":
value = match[i] value = match[i]
} }
} }

View file

@ -43,7 +43,7 @@ type BucketFactory struct {
GroupBy string `yaml:"groupby,omitempty"` // groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip GroupBy string `yaml:"groupby,omitempty"` // groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
Distinct string `yaml:"distinct"` // Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) Distinct string `yaml:"distinct"` // Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result)
Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically
Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow Labels map[string]any `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow
Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
logger *log.Entry // logger is bucket-specific logger (used by Debug as well) logger *log.Entry // logger is bucket-specific logger (used by Debug as well)
Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain
@ -225,7 +225,7 @@ func compileScopeFilter(bucketFactory *BucketFactory) error {
return errors.New("filter is mandatory for non-IP, non-Range scope") return errors.New("filter is mandatory for non-IP, non-Range scope")
} }
runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
if err != nil { if err != nil {
return fmt.Errorf("error compiling the scope filter: %w", err) return fmt.Errorf("error compiling the scope filter: %w", err)
} }
@ -381,13 +381,13 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
return errors.New("bucket without filter directive") return errors.New("bucket without filter directive")
} }
bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
if err != nil { if err != nil {
return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err) return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err)
} }
if bucketFactory.GroupBy != "" { if bucketFactory.GroupBy != "" {
bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
if err != nil { if err != nil {
return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err) return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err)
} }
@ -415,7 +415,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.logger.Tracef("Adding a non duplicate filter")
bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) bucketFactory.processors = append(bucketFactory.processors, &Uniq{})
// we're compiling and discarding the expression to be able to detect it during loading // we're compiling and discarding the expression to be able to detect it during loading
_, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
if err != nil { if err != nil {
return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err) return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err)
} }
@ -425,7 +425,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
bucketFactory.logger.Tracef("Adding a cancel_on filter") bucketFactory.logger.Tracef("Adding a cancel_on filter")
bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{})
// we're compiling and discarding the expression to be able to detect it during loading // we're compiling and discarding the expression to be able to detect it during loading
_, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
if err != nil { if err != nil {
return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err) return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err)
} }
@ -459,7 +459,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
bucketFactory.logger.Tracef("Adding conditional overflow") bucketFactory.logger.Tracef("Adding conditional overflow")
bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{}) bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{})
// we're compiling and discarding the expression to be able to detect it during loading // we're compiling and discarding the expression to be able to detect it during loading
_, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]any{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...)
if err != nil { if err != nil {
return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err) return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err)
} }

View file

@ -59,7 +59,7 @@ func overflowEventSources(evt types.Event, leaky *Leaky) (map[string]models.Sour
} }
if leaky.scopeType.RunTimeFilter != nil { if leaky.scopeType.RunTimeFilter != nil {
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
if err != nil { if err != nil {
return srcs, fmt.Errorf("while running scope filter: %w", err) return srcs, fmt.Errorf("while running scope filter: %w", err)
} }
@ -156,7 +156,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro
src.Value = &src.Range src.Value = &src.Range
if leaky.scopeType.RunTimeFilter != nil { if leaky.scopeType.RunTimeFilter != nil {
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
if err != nil { if err != nil {
return srcs, fmt.Errorf("while running scope filter: %w", err) return srcs, fmt.Errorf("while running scope filter: %w", err)
} }
@ -176,7 +176,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro
return srcs, errors.New("empty scope information") return srcs, errors.New("empty scope information")
} }
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
if err != nil { if err != nil {
return srcs, fmt.Errorf("while running scope filter: %w", err) return srcs, fmt.Errorf("while running scope filter: %w", err)
} }

View file

@ -97,16 +97,17 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string
ret := make(map[string]string) ret := make(map[string]string)
if record.Country.IsoCode != "" { switch {
case record.Country.IsoCode != "":
ret["IsoCode"] = record.Country.IsoCode ret["IsoCode"] = record.Country.IsoCode
ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion)
} else if record.RegisteredCountry.IsoCode != "" { case record.RegisteredCountry.IsoCode != "":
ret["IsoCode"] = record.RegisteredCountry.IsoCode ret["IsoCode"] = record.RegisteredCountry.IsoCode
ret["IsInEU"] = strconv.FormatBool(record.RegisteredCountry.IsInEuropeanUnion) ret["IsInEU"] = strconv.FormatBool(record.RegisteredCountry.IsInEuropeanUnion)
} else if record.RepresentedCountry.IsoCode != "" { case record.RepresentedCountry.IsoCode != "":
ret["IsoCode"] = record.RepresentedCountry.IsoCode ret["IsoCode"] = record.RepresentedCountry.IsoCode
ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion) ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion)
} else { default:
ret["IsoCode"] = "" ret["IsoCode"] = ""
ret["IsInEU"] = "false" ret["IsInEU"] = "false"
} }

View file

@ -47,7 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
return false return false
} }
for _, f := range strings.Split(target, ".") { for f := range strings.SplitSeq(target, ".") {
/* /*
** According to current Event layout we only have to handle struct and map ** According to current Event layout we only have to handle struct and map
*/ */
@ -126,7 +126,7 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error {
if static.Value != "" { if static.Value != "" {
value = static.Value value = static.Value
} else if static.RunTimeValue != nil { } else if static.RunTimeValue != nil {
output, err := exprhelpers.Run(static.RunTimeValue, map[string]interface{}{"evt": event}, clog, n.Debug) output, err := exprhelpers.Run(static.RunTimeValue, map[string]any{"evt": event}, clog, n.Debug)
if err != nil { if err != nil {
clog.Warningf("failed to run RunTimeValue : %v", err) clog.Warningf("failed to run RunTimeValue : %v", err)
continue continue
@ -138,9 +138,9 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error {
value = strconv.Itoa(out) value = strconv.Itoa(out)
case float64, float32: case float64, float32:
value = fmt.Sprintf("%f", out) value = fmt.Sprintf("%f", out)
case map[string]interface{}: case map[string]any:
clog.Warnf("Expression '%s' returned a map, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue) clog.Warnf("Expression '%s' returned a map, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue)
case []interface{}: case []any:
clog.Warnf("Expression '%s' returned an array, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue) clog.Warnf("Expression '%s' returned an array, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue)
case nil: case nil:
clog.Debugf("Expression '%s' returned nil, skipping", static.ExpValue) clog.Debugf("Expression '%s' returned nil, skipping", static.ExpValue)
@ -289,7 +289,7 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error)
event.Meta = make(map[string]string) event.Meta = make(map[string]string)
} }
if event.Unmarshaled == nil { if event.Unmarshaled == nil {
event.Unmarshaled = make(map[string]interface{}) event.Unmarshaled = make(map[string]any)
} }
if event.Type == types.LOG { if event.Type == types.LOG {
log.Tracef("INPUT '%s'", event.Line.Raw) log.Tracef("INPUT '%s'", event.Line.Raw)
@ -342,7 +342,7 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error)
if ctx.Profiling { if ctx.Profiling {
nodes[idx].Profiling = true nodes[idx].Profiling = true
} }
ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event}) ret, err := nodes[idx].process(&event, ctx, map[string]any{"evt": &event})
if err != nil { if err != nil {
clog.Errorf("Error while processing node : %v", err) clog.Errorf("Error while processing node : %v", err)
return event, err return event, err