lint: gocritic/typeDefFirst (ensure type definitions come before methods) (#3404)

* lint: gocritic/typeDefFirst (ensure type definitions come before methods)

* lint
This commit is contained in:
mmetc 2025-02-17 10:55:18 +01:00 committed by GitHub
parent 6f737a71f1
commit 5136d928ed
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 80 additions and 65 deletions

View file

@ -213,7 +213,6 @@ linters-settings:
gocritic:
enable-all: true
disabled-checks:
- typeDefFirst
- paramTypeCombine
- httpNoBody
- ifElseChain

View file

@ -29,6 +29,13 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
type configGetter func() *csconfig.Config
type cliAlerts struct {
client *apiclient.ApiClient
cfg configGetter
}
func decisionsFromAlert(alert *models.Alert) string {
ret := ""
decMap := make(map[string]int)
@ -183,13 +190,6 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro
return nil
}
type configGetter func() *csconfig.Config
type cliAlerts struct {
client *apiclient.ApiClient
cfg configGetter
}
func New(getconfig configGetter) *cliAlerts {
return &cliAlerts{
cfg: getconfig,

View file

@ -24,6 +24,13 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
type configGetter func() *csconfig.Config
type cliDecisions struct {
client *apiclient.ApiClient
cfg configGetter
}
func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error {
/*here we cheat a bit : to make it more readable for the user, we dedup some entries*/
spamLimit := make(map[string]bool)
@ -65,17 +72,17 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin
for _, alertItem := range *alerts {
for _, decisionItem := range alertItem.Decisions {
raw := []string{
fmt.Sprintf("%d", decisionItem.ID),
strconv.FormatInt(decisionItem.ID, 10),
*decisionItem.Origin,
*decisionItem.Scope + ":" + *decisionItem.Value,
*decisionItem.Scenario,
*decisionItem.Type,
alertItem.Source.Cn,
alertItem.Source.GetAsNumberName(),
fmt.Sprintf("%d", *alertItem.EventsCount),
strconv.FormatInt(int64(*alertItem.EventsCount), 10),
*decisionItem.Duration,
fmt.Sprintf("%t", *decisionItem.Simulated),
fmt.Sprintf("%d", alertItem.ID),
strconv.FormatBool(*decisionItem.Simulated),
strconv.FormatInt(alertItem.ID, 10),
}
if printMachine {
raw = append(raw, alertItem.MachineID)
@ -115,13 +122,6 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin
return nil
}
type configGetter func() *csconfig.Config
type cliDecisions struct {
client *apiclient.ApiClient
cfg configGetter
}
func New(cfg configGetter) *cliDecisions {
return &cliDecisions{
cfg: cfg,

View file

@ -85,6 +85,12 @@ func stripAnsiString(str string) string {
return reStripAnsi.ReplaceAllString(str, "")
}
type configGetter func() *csconfig.Config
type cliSupport struct {
cfg configGetter
}
func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw *zip.Writer) error {
log.Info("Collecting prometheus metrics")
@ -393,12 +399,6 @@ func (cli *cliSupport) dumpCrash(zw *zip.Writer) error {
return nil
}
type configGetter func() *csconfig.Config
type cliSupport struct {
cfg configGetter
}
func New(cfg configGetter) *cliSupport {
return &cliSupport{
cfg: cfg,

View file

@ -16,7 +16,7 @@ type DataSourceCommonCfg struct {
Config map[string]interface{} `yaml:",inline"` // to keep the datasource-specific configuration directives
}
var (
const (
TAIL_MODE = "tail"
CAT_MODE = "cat"
SERVER_MODE = "server" // No difference with tail, just a bit more verbose

View file

@ -23,7 +23,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
var dataSourceName = "kafka"
const dataSourceName = "kafka"
var linesRead = prometheus.NewCounterVec(
prometheus.CounterOpts{
@ -150,14 +150,18 @@ func (k *KafkaSource) Dump() interface{} {
func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) error {
// Start processing from latest Offset
k.Reader.SetOffsetAt(ctx, time.Now())
for {
k.logger.Tracef("reading message from topic '%s'", k.Config.Topic)
m, err := k.Reader.ReadMessage(ctx)
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
k.logger.Errorln(fmt.Errorf("while reading %s message: %w", dataSourceName, err))
continue
}
@ -220,19 +224,23 @@ func (kc *KafkaConfiguration) NewTLSConfig() (*tls.Config, error) {
if err != nil {
return &tlsConfig, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
caCert, err := os.ReadFile(kc.TLS.CaCert)
if err != nil {
return &tlsConfig, err
}
caCertPool, err := x509.SystemCertPool()
if err != nil {
return &tlsConfig, fmt.Errorf("unable to load system CA certificates: %w", err)
}
if caCertPool == nil {
caCertPool = x509.NewCertPool()
}
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.RootCAs = caCertPool
@ -273,9 +281,11 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry)
Logger: kafka.LoggerFunc(logger.Debugf),
ErrorLogger: kafka.LoggerFunc(logger.Errorf),
}
if kc.GroupID != "" && kc.Partition != 0 {
return &kafka.Reader{}, errors.New("cannot specify both group_id and partition")
}
if kc.GroupID != "" {
rConf.GroupID = kc.GroupID
} else if kc.Partition != 0 {
@ -283,8 +293,10 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry)
} else {
logger.Warnf("no group_id specified, crowdsec will only read from the 1st partition of the topic")
}
if err := rConf.Validate(); err != nil {
return &kafka.Reader{}, fmt.Errorf("while validating reader configuration: %w", err)
}
return kafka.NewReader(rConf), nil
}

View file

@ -22,7 +22,7 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/types"
)
var SyncInterval = time.Second * 10
const SyncInterval = time.Second * 10
const PapiPullKey = "papi:last_pull"

View file

@ -18,7 +18,7 @@ type AppsecCollection struct {
NativeRules []string
}
var APPSEC_RULE = "appsec-rule"
const APPSEC_RULE = "appsec-rule"
// to be filled w/ seb update
type AppsecCollectionConfig struct {
@ -77,18 +77,22 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro
for _, rulesFile := range appsecRule.SecLangFilesRules {
logger.Debugf("Adding rules from %s", rulesFile)
fullPath := filepath.Join(hub.GetDataDir(), rulesFile)
c, err := os.ReadFile(fullPath)
if err != nil {
logger.Errorf("unable to read file %s : %s", rulesFile, err)
continue
}
for _, line := range strings.Split(string(c), "\n") {
if strings.HasPrefix(line, "#") {
continue
}
if strings.TrimSpace(line) == "" {
continue
}
appsecCol.NativeRules = append(appsecCol.NativeRules, line)
}
}

View file

@ -209,6 +209,36 @@ func (l *LocalApiClientCfg) Load() error {
return nil
}
/*local api service configuration*/
type LocalApiServerCfg struct {
Enable *bool `yaml:"enable"`
ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080
ListenSocket string `yaml:"listen_socket,omitempty"`
TLS *TLSCfg `yaml:"tls"`
DbConfig *DatabaseCfg `yaml:"-"`
LogDir string `yaml:"-"`
LogMedia string `yaml:"-"`
OnlineClient *OnlineApiClientCfg `yaml:"online_client"`
ProfilesPath string `yaml:"profiles_path,omitempty"`
ConsoleConfigPath string `yaml:"console_path,omitempty"`
ConsoleConfig *ConsoleConfig `yaml:"-"`
Profiles []*ProfileCfg `yaml:"-"`
LogLevel *log.Level `yaml:"log_level"`
UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"`
TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"`
CompressLogs *bool `yaml:"-"`
LogMaxSize int `yaml:"-"`
LogMaxAge int `yaml:"-"`
LogMaxFiles int `yaml:"-"`
LogFormat string `yaml:"-"`
TrustedIPs []string `yaml:"trusted_ips,omitempty"`
PapiLogLevel *log.Level `yaml:"papi_log_level"`
DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"`
CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"`
CapiWhitelists *CapiWhitelist `yaml:"-"`
AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"`
}
func (c *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) {
trustedIPs := make([]net.IPNet, 0)
@ -250,36 +280,6 @@ type LocalAPIAutoRegisterCfg struct {
AllowedRangesParsed []*net.IPNet `yaml:"-"`
}
/*local api service configuration*/
type LocalApiServerCfg struct {
Enable *bool `yaml:"enable"`
ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080
ListenSocket string `yaml:"listen_socket,omitempty"`
TLS *TLSCfg `yaml:"tls"`
DbConfig *DatabaseCfg `yaml:"-"`
LogDir string `yaml:"-"`
LogMedia string `yaml:"-"`
OnlineClient *OnlineApiClientCfg `yaml:"online_client"`
ProfilesPath string `yaml:"profiles_path,omitempty"`
ConsoleConfigPath string `yaml:"console_path,omitempty"`
ConsoleConfig *ConsoleConfig `yaml:"-"`
Profiles []*ProfileCfg `yaml:"-"`
LogLevel *log.Level `yaml:"log_level"`
UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"`
TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"`
CompressLogs *bool `yaml:"-"`
LogMaxSize int `yaml:"-"`
LogMaxAge int `yaml:"-"`
LogMaxFiles int `yaml:"-"`
LogFormat string `yaml:"-"`
TrustedIPs []string `yaml:"trusted_ips,omitempty"`
PapiLogLevel *log.Level `yaml:"papi_log_level"`
DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"`
CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"`
CapiWhitelists *CapiWhitelist `yaml:"-"`
AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"`
}
func (c *LocalApiServerCfg) ClientURL() string {
if c == nil {
return ""

View file

@ -17,7 +17,7 @@ type ExprRuntimeDebug struct {
Outputs []OpOutput
}
var IndentStep = 4
const IndentStep = 4
// we use this struct to store the output of the expr runtime
type OpOutput struct {

View file

@ -76,7 +76,7 @@ type BucketFactory struct {
}
// we use one NameGenerator for all the future buckets
var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
var seed = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano())
func validateLeakyType(bucketFactory *BucketFactory) error {
if bucketFactory.Capacity <= 0 { // capacity must be a positive int
@ -406,7 +406,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
bucketFactory.logger.Tracef("Adding a non duplicate filter")
bucketFactory.processors = append(bucketFactory.processors, &Uniq{})
bucketFactory.logger.Infof("Compiling distinct '%s'", bucketFactory.Distinct)
//we're compiling and discarding the expression to be able to detect it during loading
// we're compiling and discarding the expression to be able to detect it during loading
_, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err)
@ -416,7 +416,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
if bucketFactory.CancelOnFilter != "" {
bucketFactory.logger.Tracef("Adding a cancel_on filter")
bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{})
//we're compiling and discarding the expression to be able to detect it during loading
// we're compiling and discarding the expression to be able to detect it during loading
_, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...)
if err != nil {
return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err)
@ -450,7 +450,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error {
if bucketFactory.ConditionalOverflow != "" {
bucketFactory.logger.Tracef("Adding conditional overflow")
bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{})
//we're compiling and discarding the expression to be able to detect it during loading
// we're compiling and discarding the expression to be able to detect it during loading
_, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...)
if err != nil {
return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err)

View file

@ -16,7 +16,7 @@ import (
)
//nolint:dupword
var fakeSystemctlOutput = `UNIT FILE STATE VENDOR PRESET
const fakeSystemctlOutput = `UNIT FILE STATE VENDOR PRESET
crowdsec-setup-detect.service enabled enabled
apache2.service enabled enabled
apparmor.service enabled enabled