mirror of
https://github.com/crowdsecurity/crowdsec.git
synced 2025-05-11 20:36:12 +02:00
reduce log verbosity, minor CI fixes, lint (#3157)
* pkg/cwhub: redundant log messages * CI: fixture output and elapsed time * CI: preload only essential hub items * report full version (including -rc2 etc.) with cscli hub update --debug * lint
This commit is contained in:
parent
6f5d75c5f1
commit
136dba61d9
19 changed files with 119 additions and 100 deletions
|
@ -12,6 +12,8 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"golang.org/x/mod/semver"
|
"golang.org/x/mod/semver"
|
||||||
|
|
||||||
|
"github.com/crowdsecurity/go-cs-lib/version"
|
||||||
|
|
||||||
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
"github.com/crowdsecurity/crowdsec/pkg/csconfig"
|
||||||
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
|
"github.com/crowdsecurity/crowdsec/pkg/cwversion"
|
||||||
)
|
)
|
||||||
|
@ -74,13 +76,13 @@ func chooseBranch(ctx context.Context, cfg *csconfig.Config) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
if csVersion == latest {
|
if csVersion == latest {
|
||||||
log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", csVersion)
|
log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", version.String())
|
||||||
return "master"
|
return "master"
|
||||||
}
|
}
|
||||||
|
|
||||||
// if current version is greater than the latest we are in pre-release
|
// if current version is greater than the latest we are in pre-release
|
||||||
if semver.Compare(csVersion, latest) == 1 {
|
if semver.Compare(csVersion, latest) == 1 {
|
||||||
log.Debugf("Your current crowdsec version seems to be a pre-release (%s), using hub branch 'master'", csVersion)
|
log.Debugf("Your current crowdsec version seems to be a pre-release (%s), using hub branch 'master'", version.String())
|
||||||
return "master"
|
return "master"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package cloudwatchacquisition
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -111,7 +112,7 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cw.Config.GroupName) == 0 {
|
if len(cw.Config.GroupName) == 0 {
|
||||||
return fmt.Errorf("group_name is mandatory for CloudwatchSource")
|
return errors.New("group_name is mandatory for CloudwatchSource")
|
||||||
}
|
}
|
||||||
|
|
||||||
if cw.Config.Mode == "" {
|
if cw.Config.Mode == "" {
|
||||||
|
@ -189,7 +190,7 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr
|
||||||
} else {
|
} else {
|
||||||
if cw.Config.AwsRegion == nil {
|
if cw.Config.AwsRegion == nil {
|
||||||
cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir")
|
cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir")
|
||||||
return fmt.Errorf("aws_region is not specified, specify it or aws_config_dir")
|
return errors.New("aws_region is not specified, specify it or aws_config_dir")
|
||||||
}
|
}
|
||||||
os.Setenv("AWS_REGION", *cw.Config.AwsRegion)
|
os.Setenv("AWS_REGION", *cw.Config.AwsRegion)
|
||||||
}
|
}
|
||||||
|
@ -228,7 +229,7 @@ func (cw *CloudwatchSource) newClient() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
return fmt.Errorf("failed to create aws session")
|
return errors.New("failed to create aws session")
|
||||||
}
|
}
|
||||||
if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" {
|
if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" {
|
||||||
cw.logger.Debugf("[testing] overloading endpoint with %s", v)
|
cw.logger.Debugf("[testing] overloading endpoint with %s", v)
|
||||||
|
@ -237,7 +238,7 @@ func (cw *CloudwatchSource) newClient() error {
|
||||||
cw.cwClient = cloudwatchlogs.New(sess)
|
cw.cwClient = cloudwatchlogs.New(sess)
|
||||||
}
|
}
|
||||||
if cw.cwClient == nil {
|
if cw.cwClient == nil {
|
||||||
return fmt.Errorf("failed to create cloudwatch client")
|
return errors.New("failed to create cloudwatch client")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -516,7 +517,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan
|
||||||
}
|
}
|
||||||
case <-cfg.t.Dying():
|
case <-cfg.t.Dying():
|
||||||
cfg.logger.Infof("logstream tail stopping")
|
cfg.logger.Infof("logstream tail stopping")
|
||||||
return fmt.Errorf("killed")
|
return errors.New("killed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -527,11 +528,11 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
dsn = strings.TrimPrefix(dsn, cw.GetName()+"://")
|
dsn = strings.TrimPrefix(dsn, cw.GetName()+"://")
|
||||||
args := strings.Split(dsn, "?")
|
args := strings.Split(dsn, "?")
|
||||||
if len(args) != 2 {
|
if len(args) != 2 {
|
||||||
return fmt.Errorf("query is mandatory (at least start_date and end_date or backlog)")
|
return errors.New("query is mandatory (at least start_date and end_date or backlog)")
|
||||||
}
|
}
|
||||||
frags := strings.Split(args[0], ":")
|
frags := strings.Split(args[0], ":")
|
||||||
if len(frags) != 2 {
|
if len(frags) != 2 {
|
||||||
return fmt.Errorf("cloudwatch path must contain group and stream : /my/group/name:stream/name")
|
return errors.New("cloudwatch path must contain group and stream : /my/group/name:stream/name")
|
||||||
}
|
}
|
||||||
cw.Config.GroupName = frags[0]
|
cw.Config.GroupName = frags[0]
|
||||||
cw.Config.StreamName = &frags[1]
|
cw.Config.StreamName = &frags[1]
|
||||||
|
@ -547,7 +548,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
switch k {
|
switch k {
|
||||||
case "log_level":
|
case "log_level":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("expected zero or one value for 'log_level'")
|
return errors.New("expected zero or one value for 'log_level'")
|
||||||
}
|
}
|
||||||
lvl, err := log.ParseLevel(v[0])
|
lvl, err := log.ParseLevel(v[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -557,14 +558,14 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
|
|
||||||
case "profile":
|
case "profile":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("expected zero or one value for 'profile'")
|
return errors.New("expected zero or one value for 'profile'")
|
||||||
}
|
}
|
||||||
awsprof := v[0]
|
awsprof := v[0]
|
||||||
cw.Config.AwsProfile = &awsprof
|
cw.Config.AwsProfile = &awsprof
|
||||||
cw.logger.Debugf("profile set to '%s'", *cw.Config.AwsProfile)
|
cw.logger.Debugf("profile set to '%s'", *cw.Config.AwsProfile)
|
||||||
case "start_date":
|
case "start_date":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("expected zero or one argument for 'start_date'")
|
return errors.New("expected zero or one argument for 'start_date'")
|
||||||
}
|
}
|
||||||
//let's reuse our parser helper so that a ton of date formats are supported
|
//let's reuse our parser helper so that a ton of date formats are supported
|
||||||
strdate, startDate := parser.GenDateParse(v[0])
|
strdate, startDate := parser.GenDateParse(v[0])
|
||||||
|
@ -572,7 +573,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
cw.Config.StartTime = &startDate
|
cw.Config.StartTime = &startDate
|
||||||
case "end_date":
|
case "end_date":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("expected zero or one argument for 'end_date'")
|
return errors.New("expected zero or one argument for 'end_date'")
|
||||||
}
|
}
|
||||||
//let's reuse our parser helper so that a ton of date formats are supported
|
//let's reuse our parser helper so that a ton of date formats are supported
|
||||||
strdate, endDate := parser.GenDateParse(v[0])
|
strdate, endDate := parser.GenDateParse(v[0])
|
||||||
|
@ -580,7 +581,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
cw.Config.EndTime = &endDate
|
cw.Config.EndTime = &endDate
|
||||||
case "backlog":
|
case "backlog":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("expected zero or one argument for 'backlog'")
|
return errors.New("expected zero or one argument for 'backlog'")
|
||||||
}
|
}
|
||||||
//let's reuse our parser helper so that a ton of date formats are supported
|
//let's reuse our parser helper so that a ton of date formats are supported
|
||||||
duration, err := time.ParseDuration(v[0])
|
duration, err := time.ParseDuration(v[0])
|
||||||
|
@ -605,10 +606,10 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
}
|
}
|
||||||
|
|
||||||
if cw.Config.StreamName == nil || cw.Config.GroupName == "" {
|
if cw.Config.StreamName == nil || cw.Config.GroupName == "" {
|
||||||
return fmt.Errorf("missing stream or group name")
|
return errors.New("missing stream or group name")
|
||||||
}
|
}
|
||||||
if cw.Config.StartTime == nil || cw.Config.EndTime == nil {
|
if cw.Config.StartTime == nil || cw.Config.EndTime == nil {
|
||||||
return fmt.Errorf("start_date and end_date or backlog are mandatory in one-shot mode")
|
return errors.New("start_date and end_date or backlog are mandatory in one-shot mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
cw.Config.Mode = configuration.CAT_MODE
|
cw.Config.Mode = configuration.CAT_MODE
|
||||||
|
@ -699,7 +700,7 @@ func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig)
|
||||||
l := types.Line{}
|
l := types.Line{}
|
||||||
evt := types.Event{}
|
evt := types.Event{}
|
||||||
if log.Message == nil {
|
if log.Message == nil {
|
||||||
return evt, fmt.Errorf("nil message")
|
return evt, errors.New("nil message")
|
||||||
}
|
}
|
||||||
msg := *log.Message
|
msg := *log.Message
|
||||||
if cfg.PrependCloudwatchTimestamp != nil && *cfg.PrependCloudwatchTimestamp {
|
if cfg.PrependCloudwatchTimestamp != nil && *cfg.PrependCloudwatchTimestamp {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package dockeracquisition
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -88,11 +89,11 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 && !d.Config.UseContainerLabels {
|
if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 && !d.Config.UseContainerLabels {
|
||||||
return fmt.Errorf("no containers names or containers ID configuration provided")
|
return errors.New("no containers names or containers ID configuration provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.Config.UseContainerLabels && (len(d.Config.ContainerName) > 0 || len(d.Config.ContainerID) > 0 || len(d.Config.ContainerIDRegexp) > 0 || len(d.Config.ContainerNameRegexp) > 0) {
|
if d.Config.UseContainerLabels && (len(d.Config.ContainerName) > 0 || len(d.Config.ContainerID) > 0 || len(d.Config.ContainerIDRegexp) > 0 || len(d.Config.ContainerNameRegexp) > 0) {
|
||||||
return fmt.Errorf("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive")
|
return errors.New("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive")
|
||||||
}
|
}
|
||||||
|
|
||||||
d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval)
|
d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval)
|
||||||
|
@ -225,7 +226,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg
|
||||||
switch k {
|
switch k {
|
||||||
case "log_level":
|
case "log_level":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("only one 'log_level' parameters is required, not many")
|
return errors.New("only one 'log_level' parameters is required, not many")
|
||||||
}
|
}
|
||||||
lvl, err := log.ParseLevel(v[0])
|
lvl, err := log.ParseLevel(v[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -234,17 +235,17 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg
|
||||||
d.logger.Logger.SetLevel(lvl)
|
d.logger.Logger.SetLevel(lvl)
|
||||||
case "until":
|
case "until":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("only one 'until' parameters is required, not many")
|
return errors.New("only one 'until' parameters is required, not many")
|
||||||
}
|
}
|
||||||
d.containerLogsOptions.Until = v[0]
|
d.containerLogsOptions.Until = v[0]
|
||||||
case "since":
|
case "since":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("only one 'since' parameters is required, not many")
|
return errors.New("only one 'since' parameters is required, not many")
|
||||||
}
|
}
|
||||||
d.containerLogsOptions.Since = v[0]
|
d.containerLogsOptions.Since = v[0]
|
||||||
case "follow_stdout":
|
case "follow_stdout":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("only one 'follow_stdout' parameters is required, not many")
|
return errors.New("only one 'follow_stdout' parameters is required, not many")
|
||||||
}
|
}
|
||||||
followStdout, err := strconv.ParseBool(v[0])
|
followStdout, err := strconv.ParseBool(v[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -254,7 +255,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg
|
||||||
d.containerLogsOptions.ShowStdout = followStdout
|
d.containerLogsOptions.ShowStdout = followStdout
|
||||||
case "follow_stderr":
|
case "follow_stderr":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("only one 'follow_stderr' parameters is required, not many")
|
return errors.New("only one 'follow_stderr' parameters is required, not many")
|
||||||
}
|
}
|
||||||
followStdErr, err := strconv.ParseBool(v[0])
|
followStdErr, err := strconv.ParseBool(v[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -264,7 +265,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg
|
||||||
d.containerLogsOptions.ShowStderr = followStdErr
|
d.containerLogsOptions.ShowStderr = followStdErr
|
||||||
case "docker_host":
|
case "docker_host":
|
||||||
if len(v) != 1 {
|
if len(v) != 1 {
|
||||||
return fmt.Errorf("only one 'docker_host' parameters is required, not many")
|
return errors.New("only one 'docker_host' parameters is required, not many")
|
||||||
}
|
}
|
||||||
if err := client.WithHost(v[0])(dockerClient); err != nil {
|
if err := client.WithHost(v[0])(dockerClient); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -3,6 +3,7 @@ package journalctlacquisition
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -98,7 +99,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err
|
||||||
if stdoutscanner == nil {
|
if stdoutscanner == nil {
|
||||||
cancel()
|
cancel()
|
||||||
cmd.Wait()
|
cmd.Wait()
|
||||||
return fmt.Errorf("failed to create stdout scanner")
|
return errors.New("failed to create stdout scanner")
|
||||||
}
|
}
|
||||||
|
|
||||||
stderrScanner := bufio.NewScanner(stderr)
|
stderrScanner := bufio.NewScanner(stderr)
|
||||||
|
@ -106,7 +107,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err
|
||||||
if stderrScanner == nil {
|
if stderrScanner == nil {
|
||||||
cancel()
|
cancel()
|
||||||
cmd.Wait()
|
cmd.Wait()
|
||||||
return fmt.Errorf("failed to create stderr scanner")
|
return errors.New("failed to create stderr scanner")
|
||||||
}
|
}
|
||||||
t.Go(func() error {
|
t.Go(func() error {
|
||||||
return readLine(stdoutscanner, stdoutChan, errChan)
|
return readLine(stdoutscanner, stdoutChan, errChan)
|
||||||
|
@ -189,7 +190,7 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(j.config.Filters) == 0 {
|
if len(j.config.Filters) == 0 {
|
||||||
return fmt.Errorf("journalctl_filter is required")
|
return errors.New("journalctl_filter is required")
|
||||||
}
|
}
|
||||||
j.args = append(args, j.config.Filters...)
|
j.args = append(args, j.config.Filters...)
|
||||||
j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, "."))
|
j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, "."))
|
||||||
|
@ -223,7 +224,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
|
|
||||||
qs := strings.TrimPrefix(dsn, "journalctl://")
|
qs := strings.TrimPrefix(dsn, "journalctl://")
|
||||||
if len(qs) == 0 {
|
if len(qs) == 0 {
|
||||||
return fmt.Errorf("empty journalctl:// DSN")
|
return errors.New("empty journalctl:// DSN")
|
||||||
}
|
}
|
||||||
|
|
||||||
params, err := url.ParseQuery(qs)
|
params, err := url.ParseQuery(qs)
|
||||||
|
@ -236,7 +237,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string,
|
||||||
j.config.Filters = append(j.config.Filters, value...)
|
j.config.Filters = append(j.config.Filters, value...)
|
||||||
case "log_level":
|
case "log_level":
|
||||||
if len(value) != 1 {
|
if len(value) != 1 {
|
||||||
return fmt.Errorf("expected zero or one value for 'log_level'")
|
return errors.New("expected zero or one value for 'log_level'")
|
||||||
}
|
}
|
||||||
lvl, err := log.ParseLevel(value[0])
|
lvl, err := log.ParseLevel(value[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -278,7 +278,7 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry)
|
||||||
ErrorLogger: kafka.LoggerFunc(logger.Errorf),
|
ErrorLogger: kafka.LoggerFunc(logger.Errorf),
|
||||||
}
|
}
|
||||||
if kc.GroupID != "" && kc.Partition != 0 {
|
if kc.GroupID != "" && kc.Partition != 0 {
|
||||||
return &kafka.Reader{}, fmt.Errorf("cannot specify both group_id and partition")
|
return &kafka.Reader{}, errors.New("cannot specify both group_id and partition")
|
||||||
}
|
}
|
||||||
if kc.GroupID != "" {
|
if kc.GroupID != "" {
|
||||||
rConf.GroupID = kc.GroupID
|
rConf.GroupID = kc.GroupID
|
||||||
|
|
|
@ -3,6 +3,7 @@ package kubernetesauditacquisition
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -73,15 +74,15 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error {
|
||||||
ka.config = k8sConfig
|
ka.config = k8sConfig
|
||||||
|
|
||||||
if ka.config.ListenAddr == "" {
|
if ka.config.ListenAddr == "" {
|
||||||
return fmt.Errorf("listen_addr cannot be empty")
|
return errors.New("listen_addr cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ka.config.ListenPort == 0 {
|
if ka.config.ListenPort == 0 {
|
||||||
return fmt.Errorf("listen_port cannot be empty")
|
return errors.New("listen_port cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ka.config.WebhookPath == "" {
|
if ka.config.WebhookPath == "" {
|
||||||
return fmt.Errorf("webhook_path cannot be empty")
|
return errors.New("webhook_path cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ka.config.WebhookPath[0] != '/' {
|
if ka.config.WebhookPath[0] != '/' {
|
||||||
|
@ -119,7 +120,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ka *KubernetesAuditSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error {
|
func (ka *KubernetesAuditSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error {
|
||||||
return fmt.Errorf("k8s-audit datasource does not support command-line acquisition")
|
return errors.New("k8s-audit datasource does not support command-line acquisition")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ka *KubernetesAuditSource) GetMode() string {
|
func (ka *KubernetesAuditSource) GetMode() string {
|
||||||
|
@ -131,7 +132,7 @@ func (ka *KubernetesAuditSource) GetName() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||||
return fmt.Errorf("k8s-audit datasource does not support one-shot acquisition")
|
return errors.New("k8s-audit datasource does not support one-shot acquisition")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package rfc3164
|
package rfc3164
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils"
|
"github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils"
|
||||||
|
@ -52,7 +52,7 @@ func (r *RFC3164) parsePRI() error {
|
||||||
pri := 0
|
pri := 0
|
||||||
|
|
||||||
if r.buf[r.position] != '<' {
|
if r.buf[r.position] != '<' {
|
||||||
return fmt.Errorf("PRI must start with '<'")
|
return errors.New("PRI must start with '<'")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.position++
|
r.position++
|
||||||
|
@ -64,18 +64,18 @@ func (r *RFC3164) parsePRI() error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if c < '0' || c > '9' {
|
if c < '0' || c > '9' {
|
||||||
return fmt.Errorf("PRI must be a number")
|
return errors.New("PRI must be a number")
|
||||||
}
|
}
|
||||||
pri = pri*10 + int(c-'0')
|
pri = pri*10 + int(c-'0')
|
||||||
r.position++
|
r.position++
|
||||||
}
|
}
|
||||||
|
|
||||||
if pri > 999 {
|
if pri > 999 {
|
||||||
return fmt.Errorf("PRI must be up to 3 characters long")
|
return errors.New("PRI must be up to 3 characters long")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position == r.len && r.buf[r.position-1] != '>' {
|
if r.position == r.len && r.buf[r.position-1] != '>' {
|
||||||
return fmt.Errorf("PRI must end with '>'")
|
return errors.New("PRI must end with '>'")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.PRI = pri
|
r.PRI = pri
|
||||||
|
@ -98,7 +98,7 @@ func (r *RFC3164) parseTimestamp() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !validTs {
|
if !validTs {
|
||||||
return fmt.Errorf("timestamp is not valid")
|
return errors.New("timestamp is not valid")
|
||||||
}
|
}
|
||||||
if r.useCurrentYear {
|
if r.useCurrentYear {
|
||||||
if r.Timestamp.Year() == 0 {
|
if r.Timestamp.Year() == 0 {
|
||||||
|
@ -122,11 +122,11 @@ func (r *RFC3164) parseHostname() error {
|
||||||
}
|
}
|
||||||
if r.strictHostname {
|
if r.strictHostname {
|
||||||
if !utils.IsValidHostnameOrIP(string(hostname)) {
|
if !utils.IsValidHostnameOrIP(string(hostname)) {
|
||||||
return fmt.Errorf("hostname is not valid")
|
return errors.New("hostname is not valid")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(hostname) == 0 {
|
if len(hostname) == 0 {
|
||||||
return fmt.Errorf("hostname is empty")
|
return errors.New("hostname is empty")
|
||||||
}
|
}
|
||||||
r.Hostname = string(hostname)
|
r.Hostname = string(hostname)
|
||||||
return nil
|
return nil
|
||||||
|
@ -147,7 +147,7 @@ func (r *RFC3164) parseTag() error {
|
||||||
r.position++
|
r.position++
|
||||||
}
|
}
|
||||||
if len(tag) == 0 {
|
if len(tag) == 0 {
|
||||||
return fmt.Errorf("tag is empty")
|
return errors.New("tag is empty")
|
||||||
}
|
}
|
||||||
r.Tag = string(tag)
|
r.Tag = string(tag)
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ func (r *RFC3164) parseTag() error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if c < '0' || c > '9' {
|
if c < '0' || c > '9' {
|
||||||
return fmt.Errorf("pid inside tag must be a number")
|
return errors.New("pid inside tag must be a number")
|
||||||
}
|
}
|
||||||
tmpPid = append(tmpPid, c)
|
tmpPid = append(tmpPid, c)
|
||||||
r.position++
|
r.position++
|
||||||
|
@ -175,7 +175,7 @@ func (r *RFC3164) parseTag() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasPid && !pidEnd {
|
if hasPid && !pidEnd {
|
||||||
return fmt.Errorf("pid inside tag must be closed with ']'")
|
return errors.New("pid inside tag must be closed with ']'")
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasPid {
|
if hasPid {
|
||||||
|
@ -191,7 +191,7 @@ func (r *RFC3164) parseMessage() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position == r.len {
|
if r.position == r.len {
|
||||||
return fmt.Errorf("message is empty")
|
return errors.New("message is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
c := r.buf[r.position]
|
c := r.buf[r.position]
|
||||||
|
@ -202,7 +202,7 @@ func (r *RFC3164) parseMessage() error {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("message is empty")
|
return errors.New("message is empty")
|
||||||
}
|
}
|
||||||
c := r.buf[r.position]
|
c := r.buf[r.position]
|
||||||
if c != ' ' {
|
if c != ' ' {
|
||||||
|
@ -219,7 +219,7 @@ func (r *RFC3164) parseMessage() error {
|
||||||
func (r *RFC3164) Parse(message []byte) error {
|
func (r *RFC3164) Parse(message []byte) error {
|
||||||
r.len = len(message)
|
r.len = len(message)
|
||||||
if r.len == 0 {
|
if r.len == 0 {
|
||||||
return fmt.Errorf("message is empty")
|
return errors.New("message is empty")
|
||||||
}
|
}
|
||||||
r.buf = message
|
r.buf = message
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package rfc5424
|
package rfc5424
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils"
|
"github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils"
|
||||||
|
@ -52,7 +52,7 @@ func (r *RFC5424) parsePRI() error {
|
||||||
pri := 0
|
pri := 0
|
||||||
|
|
||||||
if r.buf[r.position] != '<' {
|
if r.buf[r.position] != '<' {
|
||||||
return fmt.Errorf("PRI must start with '<'")
|
return errors.New("PRI must start with '<'")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.position++
|
r.position++
|
||||||
|
@ -64,18 +64,18 @@ func (r *RFC5424) parsePRI() error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if c < '0' || c > '9' {
|
if c < '0' || c > '9' {
|
||||||
return fmt.Errorf("PRI must be a number")
|
return errors.New("PRI must be a number")
|
||||||
}
|
}
|
||||||
pri = pri*10 + int(c-'0')
|
pri = pri*10 + int(c-'0')
|
||||||
r.position++
|
r.position++
|
||||||
}
|
}
|
||||||
|
|
||||||
if pri > 999 {
|
if pri > 999 {
|
||||||
return fmt.Errorf("PRI must be up to 3 characters long")
|
return errors.New("PRI must be up to 3 characters long")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position == r.len && r.buf[r.position-1] != '>' {
|
if r.position == r.len && r.buf[r.position-1] != '>' {
|
||||||
return fmt.Errorf("PRI must end with '>'")
|
return errors.New("PRI must end with '>'")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.PRI = pri
|
r.PRI = pri
|
||||||
|
@ -84,11 +84,11 @@ func (r *RFC5424) parsePRI() error {
|
||||||
|
|
||||||
func (r *RFC5424) parseVersion() error {
|
func (r *RFC5424) parseVersion() error {
|
||||||
if r.buf[r.position] != '1' {
|
if r.buf[r.position] != '1' {
|
||||||
return fmt.Errorf("version must be 1")
|
return errors.New("version must be 1")
|
||||||
}
|
}
|
||||||
r.position += 2
|
r.position += 2
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("version must be followed by a space")
|
return errors.New("version must be followed by a space")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -113,17 +113,17 @@ func (r *RFC5424) parseTimestamp() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(timestamp) == 0 {
|
if len(timestamp) == 0 {
|
||||||
return fmt.Errorf("timestamp is empty")
|
return errors.New("timestamp is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position == r.len {
|
if r.position == r.len {
|
||||||
return fmt.Errorf("EOL after timestamp")
|
return errors.New("EOL after timestamp")
|
||||||
}
|
}
|
||||||
|
|
||||||
date, err := time.Parse(VALID_TIMESTAMP, string(timestamp))
|
date, err := time.Parse(VALID_TIMESTAMP, string(timestamp))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("timestamp is not valid")
|
return errors.New("timestamp is not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Timestamp = date
|
r.Timestamp = date
|
||||||
|
@ -131,7 +131,7 @@ func (r *RFC5424) parseTimestamp() error {
|
||||||
r.position++
|
r.position++
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after timestamp")
|
return errors.New("EOL after timestamp")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -156,11 +156,11 @@ func (r *RFC5424) parseHostname() error {
|
||||||
}
|
}
|
||||||
if r.strictHostname {
|
if r.strictHostname {
|
||||||
if !utils.IsValidHostnameOrIP(string(hostname)) {
|
if !utils.IsValidHostnameOrIP(string(hostname)) {
|
||||||
return fmt.Errorf("hostname is not valid")
|
return errors.New("hostname is not valid")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(hostname) == 0 {
|
if len(hostname) == 0 {
|
||||||
return fmt.Errorf("hostname is empty")
|
return errors.New("hostname is empty")
|
||||||
}
|
}
|
||||||
r.Hostname = string(hostname)
|
r.Hostname = string(hostname)
|
||||||
return nil
|
return nil
|
||||||
|
@ -185,11 +185,11 @@ func (r *RFC5424) parseAppName() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(appname) == 0 {
|
if len(appname) == 0 {
|
||||||
return fmt.Errorf("appname is empty")
|
return errors.New("appname is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(appname) > 48 {
|
if len(appname) > 48 {
|
||||||
return fmt.Errorf("appname is too long")
|
return errors.New("appname is too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Tag = string(appname)
|
r.Tag = string(appname)
|
||||||
|
@ -215,11 +215,11 @@ func (r *RFC5424) parseProcID() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(procid) == 0 {
|
if len(procid) == 0 {
|
||||||
return fmt.Errorf("procid is empty")
|
return errors.New("procid is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(procid) > 128 {
|
if len(procid) > 128 {
|
||||||
return fmt.Errorf("procid is too long")
|
return errors.New("procid is too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.PID = string(procid)
|
r.PID = string(procid)
|
||||||
|
@ -245,11 +245,11 @@ func (r *RFC5424) parseMsgID() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(msgid) == 0 {
|
if len(msgid) == 0 {
|
||||||
return fmt.Errorf("msgid is empty")
|
return errors.New("msgid is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(msgid) > 32 {
|
if len(msgid) > 32 {
|
||||||
return fmt.Errorf("msgid is too long")
|
return errors.New("msgid is too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.MsgID = string(msgid)
|
r.MsgID = string(msgid)
|
||||||
|
@ -263,7 +263,7 @@ func (r *RFC5424) parseStructuredData() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if r.buf[r.position] != '[' {
|
if r.buf[r.position] != '[' {
|
||||||
return fmt.Errorf("structured data must start with '[' or be '-'")
|
return errors.New("structured data must start with '[' or be '-'")
|
||||||
}
|
}
|
||||||
prev := byte(0)
|
prev := byte(0)
|
||||||
for r.position < r.len {
|
for r.position < r.len {
|
||||||
|
@ -281,14 +281,14 @@ func (r *RFC5424) parseStructuredData() error {
|
||||||
}
|
}
|
||||||
r.position++
|
r.position++
|
||||||
if !done {
|
if !done {
|
||||||
return fmt.Errorf("structured data must end with ']'")
|
return errors.New("structured data must end with ']'")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RFC5424) parseMessage() error {
|
func (r *RFC5424) parseMessage() error {
|
||||||
if r.position == r.len {
|
if r.position == r.len {
|
||||||
return fmt.Errorf("message is empty")
|
return errors.New("message is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
message := []byte{}
|
message := []byte{}
|
||||||
|
@ -305,7 +305,7 @@ func (r *RFC5424) parseMessage() error {
|
||||||
func (r *RFC5424) Parse(message []byte) error {
|
func (r *RFC5424) Parse(message []byte) error {
|
||||||
r.len = len(message)
|
r.len = len(message)
|
||||||
if r.len == 0 {
|
if r.len == 0 {
|
||||||
return fmt.Errorf("syslog line is empty")
|
return errors.New("syslog line is empty")
|
||||||
}
|
}
|
||||||
r.buf = message
|
r.buf = message
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after PRI")
|
return errors.New("EOL after PRI")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseVersion()
|
err = r.parseVersion()
|
||||||
|
@ -324,7 +324,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after Version")
|
return errors.New("EOL after Version")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseTimestamp()
|
err = r.parseTimestamp()
|
||||||
|
@ -333,7 +333,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after Timestamp")
|
return errors.New("EOL after Timestamp")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseHostname()
|
err = r.parseHostname()
|
||||||
|
@ -342,7 +342,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after hostname")
|
return errors.New("EOL after hostname")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseAppName()
|
err = r.parseAppName()
|
||||||
|
@ -351,7 +351,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after appname")
|
return errors.New("EOL after appname")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseProcID()
|
err = r.parseProcID()
|
||||||
|
@ -360,7 +360,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after ProcID")
|
return errors.New("EOL after ProcID")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseMsgID()
|
err = r.parseMsgID()
|
||||||
|
@ -369,7 +369,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after MSGID")
|
return errors.New("EOL after MSGID")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseStructuredData()
|
err = r.parseStructuredData()
|
||||||
|
@ -378,7 +378,7 @@ func (r *RFC5424) Parse(message []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.position >= r.len {
|
if r.position >= r.len {
|
||||||
return fmt.Errorf("EOL after SD")
|
return errors.New("EOL after SD")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.parseMessage()
|
err = r.parseMessage()
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package syslogacquisition
|
package syslogacquisition
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -79,11 +80,11 @@ func (s *SyslogSource) GetAggregMetrics() []prometheus.Collector {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error {
|
func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error {
|
||||||
return fmt.Errorf("syslog datasource does not support one shot acquisition")
|
return errors.New("syslog datasource does not support one shot acquisition")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error {
|
||||||
return fmt.Errorf("syslog datasource does not support one shot acquisition")
|
return errors.New("syslog datasource does not support one shot acquisition")
|
||||||
}
|
}
|
||||||
|
|
||||||
func validatePort(port int) bool {
|
func validatePort(port int) bool {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package appsec
|
package appsec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -164,7 +165,7 @@ func (wc *AppsecConfig) LoadByPath(file string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if wc.Name == "" {
|
if wc.Name == "" {
|
||||||
return fmt.Errorf("name cannot be empty")
|
return errors.New("name cannot be empty")
|
||||||
}
|
}
|
||||||
if wc.LogLevel == nil {
|
if wc.LogLevel == nil {
|
||||||
lvl := wc.Logger.Logger.GetLevel()
|
lvl := wc.Logger.Logger.GetLevel()
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package appsec_rule
|
package appsec_rule
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -48,15 +49,15 @@ type CustomRule struct {
|
||||||
func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) {
|
func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) {
|
||||||
|
|
||||||
if v.Zones == nil && v.And == nil && v.Or == nil {
|
if v.Zones == nil && v.And == nil && v.Or == nil {
|
||||||
return "", nil, fmt.Errorf("no zones defined")
|
return "", nil, errors.New("no zones defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.Match.Type == "" && v.And == nil && v.Or == nil {
|
if v.Match.Type == "" && v.And == nil && v.Or == nil {
|
||||||
return "", nil, fmt.Errorf("no match type defined")
|
return "", nil, errors.New("no match type defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.Match.Value == "" && v.And == nil && v.Or == nil {
|
if v.Match.Value == "" && v.And == nil && v.Or == nil {
|
||||||
return "", nil, fmt.Errorf("no match value defined")
|
return "", nil, errors.New("no match value defined")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch ruleType {
|
switch ruleType {
|
||||||
|
|
|
@ -2,7 +2,7 @@ package csplugin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"errors"
|
||||||
|
|
||||||
plugin "github.com/hashicorp/go-plugin"
|
plugin "github.com/hashicorp/go-plugin"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -35,7 +35,7 @@ func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notific
|
||||||
return &protobufs.Empty{}, err
|
return &protobufs.Empty{}, err
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return &protobufs.Empty{}, fmt.Errorf("timeout exceeded")
|
return &protobufs.Empty{}, errors.New("timeout exceeded")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ func getUID(username string) (uint32, error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if uid < 0 || uid > math.MaxInt32 {
|
if uid < 0 || uid > math.MaxInt32 {
|
||||||
return 0, fmt.Errorf("out of bound uid")
|
return 0, errors.New("out of bound uid")
|
||||||
}
|
}
|
||||||
return uint32(uid), nil
|
return uint32(uid), nil
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ func getGID(groupname string) (uint32, error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if gid < 0 || gid > math.MaxInt32 {
|
if gid < 0 || gid > math.MaxInt32 {
|
||||||
return 0, fmt.Errorf("out of bound gid")
|
return 0, errors.New("out of bound gid")
|
||||||
}
|
}
|
||||||
return uint32(gid), nil
|
return uint32(gid), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,6 @@ func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error
|
||||||
}
|
}
|
||||||
|
|
||||||
if downloadOnly && downloaded {
|
if downloadOnly && downloaded {
|
||||||
i.hub.logger.Infof("Downloaded %s", i.Name)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin
|
||||||
|
|
||||||
downloaded, err := d.Download(ctx, url)
|
downloaded, err := d.Download(ctx, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err)
|
return false, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return downloaded, url, nil
|
return downloaded, url, nil
|
||||||
|
|
|
@ -3,6 +3,7 @@ package exprhelpers
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
|
@ -772,7 +773,7 @@ func ParseKV(params ...any) (any, error) {
|
||||||
matches := keyValuePattern.FindAllStringSubmatch(blob, -1)
|
matches := keyValuePattern.FindAllStringSubmatch(blob, -1)
|
||||||
if matches == nil {
|
if matches == nil {
|
||||||
log.Errorf("could not find any key/value pair in line")
|
log.Errorf("could not find any key/value pair in line")
|
||||||
return nil, fmt.Errorf("invalid input format")
|
return nil, errors.New("invalid input format")
|
||||||
}
|
}
|
||||||
if _, ok := target[prefix]; !ok {
|
if _, ok := target[prefix]; !ok {
|
||||||
target[prefix] = make(map[string]string)
|
target[prefix] = make(map[string]string)
|
||||||
|
@ -780,7 +781,7 @@ func ParseKV(params ...any) (any, error) {
|
||||||
_, ok := target[prefix].(map[string]string)
|
_, ok := target[prefix].(map[string]string)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Errorf("ParseKV: target is not a map[string]string")
|
log.Errorf("ParseKV: target is not a map[string]string")
|
||||||
return nil, fmt.Errorf("target is not a map[string]string")
|
return nil, errors.New("target is not a map[string]string")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
|
|
|
@ -11,6 +11,8 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)
|
||||||
|
|
||||||
echo "Pre-downloading Hub content..."
|
echo "Pre-downloading Hub content..."
|
||||||
|
|
||||||
|
start=$(date +%s%N)
|
||||||
|
|
||||||
types=$("$CSCLI" hub types -o raw)
|
types=$("$CSCLI" hub types -o raw)
|
||||||
|
|
||||||
for itemtype in $types; do
|
for itemtype in $types; do
|
||||||
|
@ -19,9 +21,12 @@ for itemtype in $types; do
|
||||||
#shellcheck disable=SC2086
|
#shellcheck disable=SC2086
|
||||||
"$CSCLI" "$itemtype" install \
|
"$CSCLI" "$itemtype" install \
|
||||||
$ALL_ITEMS \
|
$ALL_ITEMS \
|
||||||
--download-only \
|
--download-only
|
||||||
--error
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
echo " done."
|
elapsed=$((($(date +%s%N) - start)/1000000))
|
||||||
|
# bash only does integer arithmetic, we could use bc or have some fun with sed
|
||||||
|
elapsed=$(echo "$elapsed" | sed -e 's/...$/.&/;t' -e 's/.$/.0&/')
|
||||||
|
|
||||||
|
echo " done in $elapsed secs."
|
||||||
|
|
|
@ -70,7 +70,9 @@ make_init_data() {
|
||||||
./instance-db config-yaml
|
./instance-db config-yaml
|
||||||
./instance-db setup
|
./instance-db setup
|
||||||
|
|
||||||
./bin/preload-hub-items
|
# preload some content and data files
|
||||||
|
"$CSCLI" collections install crowdsecurity/linux --download-only
|
||||||
|
# sub-items did not respect --download-only
|
||||||
./bin/remove-all-hub-items
|
./bin/remove-all-hub-items
|
||||||
|
|
||||||
# when installed packages are always using sqlite, so no need to regenerate
|
# when installed packages are always using sqlite, so no need to regenerate
|
||||||
|
|
|
@ -116,7 +116,10 @@ make_init_data() {
|
||||||
|
|
||||||
"$CSCLI" --warning hub update
|
"$CSCLI" --warning hub update
|
||||||
|
|
||||||
./bin/preload-hub-items
|
# preload some content and data files
|
||||||
|
"$CSCLI" collections install crowdsecurity/linux --download-only
|
||||||
|
# sub-items did not respect --download-only
|
||||||
|
./bin/remove-all-hub-items
|
||||||
|
|
||||||
# force TCP, the default would be unix socket
|
# force TCP, the default would be unix socket
|
||||||
"$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --url http://127.0.0.1:8080 --auto --force
|
"$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --url http://127.0.0.1:8080 --auto --force
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue