mirror of
https://github.com/crowdsecurity/crowdsec.git
synced 2025-05-10 20:05:55 +02:00
lint/gocritic: enable importShadow, typeUnparen, unnecessaryDefer (#3583)
This commit is contained in:
parent
34e306505c
commit
418a27596e
17 changed files with 62 additions and 65 deletions
|
@ -158,21 +158,18 @@ linters:
|
|||
disabled-checks:
|
||||
- paramTypeCombine
|
||||
- ifElseChain
|
||||
- importShadow
|
||||
- hugeParam
|
||||
- commentedOutCode
|
||||
- commentedOutImport
|
||||
- unnamedResult
|
||||
- sloppyReassign
|
||||
- appendCombine
|
||||
- typeUnparen
|
||||
- commentFormatting
|
||||
- deferInLoop #
|
||||
- whyNoLint
|
||||
- equalFold #
|
||||
- unnecessaryBlock #
|
||||
- tooManyResultsChecker
|
||||
- unnecessaryDefer
|
||||
- docStub
|
||||
- preferFprint
|
||||
|
||||
|
|
|
@ -312,7 +312,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command {
|
|||
case "human":
|
||||
cmdConsoleStatusTable(color.Output, cfg.Cscli.Color, *consoleCfg)
|
||||
case "json":
|
||||
out := map[string](*bool){
|
||||
out := map[string]*bool{
|
||||
csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions,
|
||||
csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios,
|
||||
csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios,
|
||||
|
|
|
@ -99,12 +99,12 @@ func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Eve
|
|||
|
||||
alert.EventsCount = ptr.Of(int32(len(alert.Events)))
|
||||
alert.Leakspeed = ptr.Of("")
|
||||
alert.Scenario = ptr.Of(inEvt.Appsec.MatchedRules.GetName())
|
||||
alert.ScenarioHash = ptr.Of(inEvt.Appsec.MatchedRules.GetHash())
|
||||
alert.ScenarioVersion = ptr.Of(inEvt.Appsec.MatchedRules.GetVersion())
|
||||
alert.Scenario = ptr.Of(inEvt.Appsec.GetName())
|
||||
alert.ScenarioHash = ptr.Of(inEvt.Appsec.GetHash())
|
||||
alert.ScenarioVersion = ptr.Of(inEvt.Appsec.GetVersion())
|
||||
alert.Simulated = ptr.Of(false)
|
||||
alert.Source = &source
|
||||
msg := fmt.Sprintf("AppSec block: %s from %s (%s)", inEvt.Appsec.MatchedRules.GetName(),
|
||||
msg := fmt.Sprintf("AppSec block: %s from %s (%s)", inEvt.Appsec.GetName(),
|
||||
alert.Source.IP, inEvt.Parsed["remediation_cmpt_ip"])
|
||||
alert.Message = &msg
|
||||
alert.StartAt = ptr.Of(time.Now().UTC().Format(time.RFC3339))
|
||||
|
@ -278,7 +278,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR
|
|||
matchedZones = append(matchedZones, zone)
|
||||
}
|
||||
|
||||
corazaRule := map[string]interface{}{
|
||||
corazaRule := map[string]any{
|
||||
"id": rule.Rule().ID(),
|
||||
"uri": evt.Parsed["target_uri"],
|
||||
"rule_type": kind,
|
||||
|
|
|
@ -57,7 +57,7 @@ type CloudwatchSuite struct {
|
|||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *CloudwatchSuite) SetupSuite() {
|
||||
func (*CloudwatchSuite) SetupSuite() {
|
||||
def_PollNewStreamInterval = 1 * time.Second
|
||||
def_PollStreamInterval = 1 * time.Second
|
||||
def_StreamReadTimeout = 10 * time.Second
|
||||
|
@ -308,7 +308,7 @@ stream_name: test_stream`,
|
|||
})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
run: func(t *testing.T, cw *CloudwatchSource) {
|
||||
run: func(_ *testing.T, _ *CloudwatchSource) {
|
||||
// wait for new stream pickup + stream poll interval
|
||||
time.Sleep(def_PollNewStreamInterval + (1 * time.Second))
|
||||
time.Sleep(def_PollStreamInterval + (1 * time.Second))
|
||||
|
@ -325,7 +325,7 @@ stream_name: test_stream`,
|
|||
dbgLogger.Infof("starting test")
|
||||
|
||||
cw := CloudwatchSource{}
|
||||
err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE)
|
||||
err := cw.Configure([]byte(tc.config), dbgLogger, configuration.METRICS_NONE)
|
||||
cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr)
|
||||
|
||||
if tc.expectedCfgErr != "" {
|
||||
|
@ -442,7 +442,7 @@ stream_name: test_stream`,
|
|||
dbgLogger.Logger.SetLevel(logrus.DebugLevel)
|
||||
|
||||
cw := CloudwatchSource{}
|
||||
err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE)
|
||||
err := cw.Configure([]byte(tc.config), dbgLogger, configuration.METRICS_NONE)
|
||||
cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr)
|
||||
|
||||
if tc.expectedCfgErr != "" {
|
||||
|
|
|
@ -115,7 +115,7 @@ func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLev
|
|||
return nil
|
||||
}
|
||||
|
||||
func (k *KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error {
|
||||
func (*KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error {
|
||||
return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName)
|
||||
}
|
||||
|
||||
|
@ -123,27 +123,27 @@ func (k *KafkaSource) GetMode() string {
|
|||
return k.Config.Mode
|
||||
}
|
||||
|
||||
func (k *KafkaSource) GetName() string {
|
||||
func (*KafkaSource) GetName() string {
|
||||
return dataSourceName
|
||||
}
|
||||
|
||||
func (k *KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error {
|
||||
func (*KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error {
|
||||
return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName)
|
||||
}
|
||||
|
||||
func (k *KafkaSource) CanRun() error {
|
||||
func (*KafkaSource) CanRun() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KafkaSource) GetMetrics() []prometheus.Collector {
|
||||
func (*KafkaSource) GetMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead}
|
||||
}
|
||||
|
||||
func (k *KafkaSource) GetAggregMetrics() []prometheus.Collector {
|
||||
func (*KafkaSource) GetAggregMetrics() []prometheus.Collector {
|
||||
return []prometheus.Collector{linesRead}
|
||||
}
|
||||
|
||||
func (k *KafkaSource) Dump() interface{} {
|
||||
func (k *KafkaSource) Dump() any {
|
||||
return k
|
||||
}
|
||||
|
||||
|
|
|
@ -399,6 +399,8 @@ func TestDeleteDecisions(t *testing.T) {
|
|||
ctx := t.Context()
|
||||
|
||||
mux, urlx, teardown := setup()
|
||||
defer teardown()
|
||||
|
||||
mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`))
|
||||
|
@ -433,8 +435,6 @@ func TestDeleteDecisions(t *testing.T) {
|
|||
deleted, _, err := client.Decisions.Delete(ctx, filters)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "1", deleted.NbDeleted)
|
||||
|
||||
defer teardown()
|
||||
}
|
||||
|
||||
func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) {
|
||||
|
|
|
@ -396,11 +396,7 @@ func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) {
|
|||
batchSize := 50
|
||||
|
||||
for start := 0; start < len(cache); start += batchSize {
|
||||
end := start + batchSize
|
||||
|
||||
if end > len(cache) {
|
||||
end = len(cache)
|
||||
}
|
||||
end := min(start+batchSize, len(cache))
|
||||
|
||||
if err := a.sendBatch(ctx, cache[start:end]); err != nil {
|
||||
log.Errorf("sending signal to central API: %s", err)
|
||||
|
|
|
@ -251,7 +251,7 @@ func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) {
|
|||
// Metrics are sent at start, then at the randomized metricsIntervalFirst,
|
||||
// then at regular metricsInterval. If a change is detected in the list
|
||||
// of machines, the next metrics are sent immediately.
|
||||
func (a *apic) SendMetrics(ctx context.Context, stop chan (bool)) {
|
||||
func (a *apic) SendMetrics(ctx context.Context, stop chan bool) {
|
||||
defer trace.CatchPanic("lapi/metricsToAPIC")
|
||||
|
||||
// verify the list of machines every <checkInt> interval
|
||||
|
|
|
@ -29,7 +29,7 @@ type JWT struct {
|
|||
TlsAuth *TLSAuth
|
||||
}
|
||||
|
||||
func PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
func PayloadFunc(data any) jwt.MapClaims {
|
||||
if value, ok := data.(*models.WatcherAuthRequest); ok {
|
||||
return jwt.MapClaims{
|
||||
MachineIDKey: &value.MachineID,
|
||||
|
@ -39,7 +39,7 @@ func PayloadFunc(data interface{}) jwt.MapClaims {
|
|||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
func IdentityHandler(c *gin.Context) interface{} {
|
||||
func IdentityHandler(c *gin.Context) any {
|
||||
claims := jwt.ExtractClaims(c)
|
||||
machineID := claims[MachineIDKey].(string)
|
||||
|
||||
|
@ -172,7 +172,7 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) {
|
|||
return &ret, nil
|
||||
}
|
||||
|
||||
func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) {
|
||||
func (j *JWT) Authenticator(c *gin.Context) (any, error) {
|
||||
var (
|
||||
err error
|
||||
auth *authInput
|
||||
|
@ -248,7 +248,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func Authorizator(data interface{}, c *gin.Context) bool {
|
||||
func Authorizator(data any, c *gin.Context) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -92,17 +92,18 @@ func (pw *PluginWatcher) watchPluginTicker(pluginName string) {
|
|||
threshold := pw.PluginConfigByName[pluginName].GroupThreshold
|
||||
|
||||
//only size is set
|
||||
if threshold > 0 && interval == 0 {
|
||||
switch {
|
||||
case threshold > 0 && interval == 0:
|
||||
watchCount = threshold
|
||||
watchTime = DefaultEmptyTicker
|
||||
} else if interval != 0 && threshold == 0 {
|
||||
case interval != 0 && threshold == 0:
|
||||
//only time is set
|
||||
watchTime = interval
|
||||
} else if interval != 0 && threshold != 0 {
|
||||
case interval != 0 && threshold != 0:
|
||||
//both are set
|
||||
watchTime = DefaultEmptyTicker
|
||||
watchCount = threshold
|
||||
} else {
|
||||
default:
|
||||
//none are set, we sent every event we receive
|
||||
watchTime = DefaultEmptyTicker
|
||||
watchCount = 1
|
||||
|
|
|
@ -133,11 +133,12 @@ func handleAlertIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, en
|
|||
}
|
||||
|
||||
func handleAlertIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error {
|
||||
if ip_sz == 4 {
|
||||
switch {
|
||||
case ip_sz == 4:
|
||||
handleAlertIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates)
|
||||
} else if ip_sz == 16 {
|
||||
case ip_sz == 16:
|
||||
handleAlertIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates)
|
||||
} else if ip_sz != 0 {
|
||||
case ip_sz != 0:
|
||||
return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz)
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ func (o *OpOutput) String() string {
|
|||
return ret + ""
|
||||
}
|
||||
|
||||
func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string {
|
||||
func (ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string {
|
||||
locations := program.Locations()
|
||||
src := string(program.Source())
|
||||
|
||||
|
@ -356,7 +356,7 @@ func (erp ExprRuntimeDebug) ipSeek(ip int) []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
func Run(program *vm.Program, env interface{}, logger *log.Entry, debug bool) (any, error) {
|
||||
func Run(program *vm.Program, env any, logger *log.Entry, debug bool) (any, error) {
|
||||
if debug {
|
||||
dbgInfo, ret, err := RunWithDebug(program, env, logger)
|
||||
DisplayExprDebug(program, dbgInfo, logger, ret)
|
||||
|
@ -383,7 +383,7 @@ func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry
|
|||
}
|
||||
|
||||
// TBD: Based on the level of the logger (ie. trace vs debug) we could decide to add more low level instructions (pop, push, etc.)
|
||||
func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]OpOutput, any, error) {
|
||||
func RunWithDebug(program *vm.Program, env any, logger *log.Entry) ([]OpOutput, any, error) {
|
||||
outputs := []OpOutput{}
|
||||
erp := ExprRuntimeDebug{
|
||||
Logger: logger,
|
||||
|
|
|
@ -875,11 +875,12 @@ func ParseKV(params ...any) (any, error) {
|
|||
value := ""
|
||||
|
||||
for i, name := range keyValuePattern.SubexpNames() {
|
||||
if name == "key" {
|
||||
switch {
|
||||
case name == "key":
|
||||
key = match[i]
|
||||
} else if name == "quoted_value" && match[i] != "" {
|
||||
case name == "quoted_value" && match[i] != "":
|
||||
value = match[i]
|
||||
} else if name == "value" && match[i] != "" {
|
||||
case name == "value" && match[i] != "":
|
||||
value = match[i]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ type BucketFactory struct {
|
|||
GroupBy string `yaml:"groupby,omitempty"` // groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip
|
||||
Distinct string `yaml:"distinct"` // Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result)
|
||||
Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically
|
||||
Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow
|
||||
Labels map[string]any `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow
|
||||
Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration
|
||||
logger *log.Entry // logger is bucket-specific logger (used by Debug as well)
|
||||
Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain
|
||||
|
@ -225,7 +225,7 @@ func compileScopeFilter(bucketFactory *BucketFactory) error {
|
|||
return errors.New("filter is mandatory for non-IP, non-Range scope")
|
||||
}
|
||||
|
||||
runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
||||
runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compiling the scope filter: %w", err)
|
||||
}
|
||||
|
@ -381,13 +381,13 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
|
|||
return errors.New("bucket without filter directive")
|
||||
}
|
||||
|
||||
bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
||||
bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err)
|
||||
}
|
||||
|
||||
if bucketFactory.GroupBy != "" {
|
||||
bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
||||
bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err)
|
||||
}
|
||||
|
@ -415,7 +415,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
|
|||
bucketFactory.logger.Tracef("Adding a non duplicate filter")
|
||||
bucketFactory.processors = append(bucketFactory.processors, &Uniq{})
|
||||
// we're compiling and discarding the expression to be able to detect it during loading
|
||||
_, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
||||
_, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err)
|
||||
}
|
||||
|
@ -425,7 +425,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
|
|||
bucketFactory.logger.Tracef("Adding a cancel_on filter")
|
||||
bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{})
|
||||
// we're compiling and discarding the expression to be able to detect it during loading
|
||||
_, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
|
||||
_, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err)
|
||||
}
|
||||
|
@ -459,7 +459,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
|
|||
bucketFactory.logger.Tracef("Adding conditional overflow")
|
||||
bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{})
|
||||
// we're compiling and discarding the expression to be able to detect it during loading
|
||||
_, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...)
|
||||
_, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]any{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err)
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func overflowEventSources(evt types.Event, leaky *Leaky) (map[string]models.Sour
|
|||
}
|
||||
|
||||
if leaky.scopeType.RunTimeFilter != nil {
|
||||
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
|
||||
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
|
||||
if err != nil {
|
||||
return srcs, fmt.Errorf("while running scope filter: %w", err)
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro
|
|||
src.Value = &src.Range
|
||||
|
||||
if leaky.scopeType.RunTimeFilter != nil {
|
||||
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
|
||||
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
|
||||
if err != nil {
|
||||
return srcs, fmt.Errorf("while running scope filter: %w", err)
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro
|
|||
return srcs, errors.New("empty scope information")
|
||||
}
|
||||
|
||||
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
|
||||
retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug)
|
||||
if err != nil {
|
||||
return srcs, fmt.Errorf("while running scope filter: %w", err)
|
||||
}
|
||||
|
|
|
@ -97,16 +97,17 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string
|
|||
|
||||
ret := make(map[string]string)
|
||||
|
||||
if record.Country.IsoCode != "" {
|
||||
switch {
|
||||
case record.Country.IsoCode != "":
|
||||
ret["IsoCode"] = record.Country.IsoCode
|
||||
ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion)
|
||||
} else if record.RegisteredCountry.IsoCode != "" {
|
||||
case record.RegisteredCountry.IsoCode != "":
|
||||
ret["IsoCode"] = record.RegisteredCountry.IsoCode
|
||||
ret["IsInEU"] = strconv.FormatBool(record.RegisteredCountry.IsInEuropeanUnion)
|
||||
} else if record.RepresentedCountry.IsoCode != "" {
|
||||
case record.RepresentedCountry.IsoCode != "":
|
||||
ret["IsoCode"] = record.RepresentedCountry.IsoCode
|
||||
ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion)
|
||||
} else {
|
||||
default:
|
||||
ret["IsoCode"] = ""
|
||||
ret["IsInEU"] = "false"
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
for _, f := range strings.Split(target, ".") {
|
||||
for f := range strings.SplitSeq(target, ".") {
|
||||
/*
|
||||
** According to current Event layout we only have to handle struct and map
|
||||
*/
|
||||
|
@ -126,7 +126,7 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error {
|
|||
if static.Value != "" {
|
||||
value = static.Value
|
||||
} else if static.RunTimeValue != nil {
|
||||
output, err := exprhelpers.Run(static.RunTimeValue, map[string]interface{}{"evt": event}, clog, n.Debug)
|
||||
output, err := exprhelpers.Run(static.RunTimeValue, map[string]any{"evt": event}, clog, n.Debug)
|
||||
if err != nil {
|
||||
clog.Warningf("failed to run RunTimeValue : %v", err)
|
||||
continue
|
||||
|
@ -138,9 +138,9 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error {
|
|||
value = strconv.Itoa(out)
|
||||
case float64, float32:
|
||||
value = fmt.Sprintf("%f", out)
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
clog.Warnf("Expression '%s' returned a map, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue)
|
||||
case []interface{}:
|
||||
case []any:
|
||||
clog.Warnf("Expression '%s' returned an array, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue)
|
||||
case nil:
|
||||
clog.Debugf("Expression '%s' returned nil, skipping", static.ExpValue)
|
||||
|
@ -289,7 +289,7 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error)
|
|||
event.Meta = make(map[string]string)
|
||||
}
|
||||
if event.Unmarshaled == nil {
|
||||
event.Unmarshaled = make(map[string]interface{})
|
||||
event.Unmarshaled = make(map[string]any)
|
||||
}
|
||||
if event.Type == types.LOG {
|
||||
log.Tracef("INPUT '%s'", event.Line.Raw)
|
||||
|
@ -342,7 +342,7 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error)
|
|||
if ctx.Profiling {
|
||||
nodes[idx].Profiling = true
|
||||
}
|
||||
ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event})
|
||||
ret, err := nodes[idx].process(&event, ctx, map[string]any{"evt": &event})
|
||||
if err != nil {
|
||||
clog.Errorf("Error while processing node : %v", err)
|
||||
return event, err
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue