enable linter: revive (indent-error-flow) (#3068)

* enable linter: revive (indent-error-flow)

* lint
This commit is contained in:
mmetc 2024-06-10 17:36:22 +02:00 committed by GitHub
parent 7fd01ae3fc
commit 31ed9fb5ee
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 63 additions and 69 deletions

View file

@ -70,7 +70,6 @@ linters-settings:
- "!**/pkg/database/*.go"
- "!**/pkg/exprhelpers/*.go"
- "!**/pkg/acquisition/modules/appsec/appsec.go"
- "!**/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go"
- "!**/pkg/apiserver/controllers/v1/errors.go"
yaml:
files:
@ -147,8 +146,6 @@ linters-settings:
disabled: true
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: true
- name: import-alias-naming
disabled: true
- name: import-shadowing

View file

@ -50,9 +50,9 @@ func (o *rx) Evaluate(tx plugintypes.TransactionState, value string) bool {
tx.CaptureField(i, c)
}
return true
} else {
return o.re.MatchString(value)
}
return o.re.MatchString(value)
}
// RegisterRX registers the rx operator using a WASI implementation instead of Go.

View file

@ -4,6 +4,7 @@ import (
"bytes"
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"strings"
@ -95,7 +96,7 @@ func (k *KinesisSource) newClient() error {
}
if sess == nil {
return fmt.Errorf("failed to create aws session")
return errors.New("failed to create aws session")
}
config := aws.NewConfig()
if k.Config.AwsRegion != "" {
@ -106,7 +107,7 @@ func (k *KinesisSource) newClient() error {
}
k.kClient = kinesis.New(sess, config)
if k.kClient == nil {
return fmt.Errorf("failed to create kinesis client")
return errors.New("failed to create kinesis client")
}
return nil
}
@ -124,7 +125,7 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error {
err := yaml.UnmarshalStrict(yamlConfig, &k.Config)
if err != nil {
return fmt.Errorf("Cannot parse kinesis datasource configuration: %w", err)
return fmt.Errorf("cannot parse kinesis datasource configuration: %w", err)
}
if k.Config.Mode == "" {
@ -132,16 +133,16 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error {
}
if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut {
return fmt.Errorf("stream_name is mandatory when use_enhanced_fanout is false")
return errors.New("stream_name is mandatory when use_enhanced_fanout is false")
}
if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut {
return fmt.Errorf("stream_arn is mandatory when use_enhanced_fanout is true")
return errors.New("stream_arn is mandatory when use_enhanced_fanout is true")
}
if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut {
return fmt.Errorf("consumer_name is mandatory when use_enhanced_fanout is true")
return errors.New("consumer_name is mandatory when use_enhanced_fanout is true")
}
if k.Config.StreamARN != "" && k.Config.StreamName != "" {
return fmt.Errorf("stream_arn and stream_name are mutually exclusive")
return errors.New("stream_arn and stream_name are mutually exclusive")
}
if k.Config.MaxRetries <= 0 {
k.Config.MaxRetries = 10
@ -169,7 +170,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL
}
func (k *KinesisSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error {
return fmt.Errorf("kinesis datasource does not support command-line acquisition")
return errors.New("kinesis datasource does not support command-line acquisition")
}
func (k *KinesisSource) GetMode() string {
@ -181,7 +182,7 @@ func (k *KinesisSource) GetName() string {
}
func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
return fmt.Errorf("kinesis datasource does not support one-shot acquisition")
return errors.New("kinesis datasource does not support one-shot acquisition")
}
func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) {
@ -524,9 +525,8 @@ func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb)
defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming")
if k.Config.UseEnhancedFanOut {
return k.EnhancedRead(out, t)
} else {
return k.ReadFromStream(out, t)
}
return k.ReadFromStream(out, t)
})
return nil
}

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -13,7 +14,6 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
)
@ -120,11 +120,10 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu
resp, err := lc.Get(uri)
if err != nil {
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "error querying range")
} else {
lc.increaseTicker(ticker)
continue
return fmt.Errorf("error querying range: %w", err)
}
lc.increaseTicker(ticker)
continue
}
if resp.StatusCode != http.StatusOK {
@ -132,22 +131,20 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "bad HTTP response code: %d: %s", resp.StatusCode, string(body))
} else {
lc.increaseTicker(ticker)
continue
return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err)
}
lc.increaseTicker(ticker)
continue
}
var lq LokiQueryRangeResponse
if err := json.NewDecoder(resp.Body).Decode(&lq); err != nil {
resp.Body.Close()
if ok := lc.shouldRetry(); !ok {
return errors.Wrapf(err, "error decoding Loki response")
} else {
lc.increaseTicker(ticker)
continue
return fmt.Errorf("error decoding Loki response: %w", err)
}
lc.increaseTicker(ticker)
continue
}
resp.Body.Close()
lc.Logger.Tracef("Got response: %+v", lq)
@ -261,7 +258,7 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) {
if err != nil {
lc.Logger.Errorf("Error connecting to websocket, err: %s", err)
return responseChan, fmt.Errorf("error connecting to websocket")
return responseChan, errors.New("error connecting to websocket")
}
lc.t.Go(func() error {

View file

@ -276,7 +276,7 @@ func extractBucketAndPrefixFromEventBridge(message *string) (string, string, err
if eventBody.Detail.Bucket.Name != "" {
return eventBody.Detail.Bucket.Name, eventBody.Detail.Object.Key, nil
}
return "", "", fmt.Errorf("invalid event body for event bridge format")
return "", "", errors.New("invalid event body for event bridge format")
}
func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) {
@ -286,7 +286,7 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error)
return "", "", err
}
if len(s3notifBody.Records) == 0 {
return "", "", fmt.Errorf("no records found in S3 notification")
return "", "", errors.New("no records found in S3 notification")
}
if !strings.HasPrefix(s3notifBody.Records[0].EventName, "ObjectCreated:") {
return "", "", fmt.Errorf("event %s is not supported", s3notifBody.Records[0].EventName)
@ -295,19 +295,20 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error)
}
func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, error) {
if s.Config.SQSFormat == SQSFormatEventBridge {
switch s.Config.SQSFormat {
case SQSFormatEventBridge:
bucket, key, err := extractBucketAndPrefixFromEventBridge(message)
if err != nil {
return "", "", err
}
return bucket, key, nil
} else if s.Config.SQSFormat == SQSFormatS3Notification {
case SQSFormatS3Notification:
bucket, key, err := extractBucketAndPrefixFromS3Notif(message)
if err != nil {
return "", "", err
}
return bucket, key, nil
} else {
default:
bucket, key, err := extractBucketAndPrefixFromEventBridge(message)
if err == nil {
s.Config.SQSFormat = SQSFormatEventBridge
@ -318,7 +319,7 @@ func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, erro
s.Config.SQSFormat = SQSFormatS3Notification
return bucket, key, nil
}
return "", "", fmt.Errorf("SQS message format not supported")
return "", "", errors.New("SQS message format not supported")
}
}
@ -496,15 +497,15 @@ func (s *S3Source) UnmarshalConfig(yamlConfig []byte) error {
}
if s.Config.BucketName != "" && s.Config.SQSName != "" {
return fmt.Errorf("bucket_name and sqs_name are mutually exclusive")
return errors.New("bucket_name and sqs_name are mutually exclusive")
}
if s.Config.PollingMethod == PollMethodSQS && s.Config.SQSName == "" {
return fmt.Errorf("sqs_name is required when using sqs polling method")
return errors.New("sqs_name is required when using sqs polling method")
}
if s.Config.BucketName == "" && s.Config.PollingMethod == PollMethodList {
return fmt.Errorf("bucket_name is required")
return errors.New("bucket_name is required")
}
if s.Config.SQSFormat != "" && s.Config.SQSFormat != SQSFormatEventBridge && s.Config.SQSFormat != SQSFormatS3Notification {
@ -567,7 +568,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger *
dsn = strings.TrimPrefix(dsn, "s3://")
args := strings.Split(dsn, "?")
if len(args[0]) == 0 {
return fmt.Errorf("empty s3:// DSN")
return errors.New("empty s3:// DSN")
}
if len(args) == 2 && len(args[1]) != 0 {

7
pkg/cache/cache.go vendored
View file

@ -111,7 +111,8 @@ func SetKey(cacheName string, key string, value string, expiration *time.Duratio
func GetKey(cacheName string, key string) (string, error) {
for i, name := range CacheNames {
if name == cacheName {
if value, err := Caches[i].Get(key); err != nil {
value, err := Caches[i].Get(key)
if err != nil {
// do not warn or log if key not found
if errors.Is(err, gcache.KeyNotFoundError) {
return "", nil
@ -119,9 +120,9 @@ func GetKey(cacheName string, key string) (string, error) {
CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err)
return "", err
} else {
return value.(string), nil
}
return value.(string), nil
}
}

View file

@ -645,7 +645,6 @@ func (t *HubTestItem) Run() error {
return t.RunWithLogFile()
} else if t.Config.NucleiTemplate != "" {
return t.RunWithNucleiTemplate()
} else {
return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name)
}
return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name)
}

View file

@ -82,22 +82,22 @@ func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error {
cancelExprCacheLock.Unlock()
u.CancelOnFilter = compiled.CancelOnFilter
return nil
} else {
cancelExprCacheLock.Unlock()
//release the lock during compile
compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
bucketFactory.logger.Errorf("reset_filter compile error : %s", err)
return err
}
u.CancelOnFilter = compiledExpr.CancelOnFilter
if bucketFactory.Debug {
u.Debug = true
}
cancelExprCacheLock.Lock()
cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr
cancelExprCacheLock.Unlock()
}
return err
cancelExprCacheLock.Unlock()
//release the lock during compile
compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
bucketFactory.logger.Errorf("reset_filter compile error : %s", err)
return err
}
u.CancelOnFilter = compiledExpr.CancelOnFilter
if bucketFactory.Debug {
u.Debug = true
}
cancelExprCacheLock.Lock()
cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr
cancelExprCacheLock.Unlock()
return nil
}

View file

@ -39,11 +39,9 @@ func (u *Uniq) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Lea
leaky.logger.Debugf("Uniq(%s) : ok", element)
u.KeyCache[element] = true
return &msg
} else {
leaky.logger.Debugf("Uniq(%s) : ko, discard event", element)
return nil
}
leaky.logger.Debugf("Uniq(%s) : ko, discard event", element)
return nil
}
}

View file

@ -63,11 +63,12 @@ func (e *Event) SetParsed(key string, value string) bool {
}
func (e *Event) GetType() string {
if e.Type == OVFLW {
switch e.Type {
case OVFLW:
return "overflow"
} else if e.Type == LOG {
case LOG:
return "log"
} else {
default:
log.Warningf("unknown event type for %+v", e)
return "unknown"
}