mirror of
https://github.com/crowdsecurity/crowdsec.git
synced 2025-05-11 12:25:53 +02:00
fix linter warnings : dead code, simplification
This commit is contained in:
parent
4bed585d6b
commit
db9e1e280d
24 changed files with 110 additions and 115 deletions
|
@ -265,7 +265,9 @@ cscli api credentials # Display your API credentials
|
|||
|
||||
cmdAPI.AddCommand(cmdAPICreds)
|
||||
cmdAPIEnroll.Flags().StringVarP(&userID, "user", "u", "", "User ID (required)")
|
||||
cmdAPIEnroll.MarkFlagRequired("user")
|
||||
if err := cmdAPIEnroll.MarkFlagRequired("user"); err != nil {
|
||||
log.Errorf("'user' flag : %s", err)
|
||||
}
|
||||
cmdAPI.AddCommand(cmdAPIEnroll)
|
||||
cmdAPI.AddCommand(cmdAPIResetPassword)
|
||||
cmdAPI.AddCommand(cmdAPIRegister)
|
||||
|
|
|
@ -164,15 +164,6 @@ func BanAdd(target string, duration string, reason string, action string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func banFlush() error {
|
||||
allBa := types.BanApplication{}
|
||||
records := dbctx.Db.Delete(&allBa)
|
||||
if records.Error != nil {
|
||||
return records.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBanCmds() *cobra.Command {
|
||||
/*TODO : add a remediation type*/
|
||||
var cmdBan = &cobra.Command{
|
||||
|
|
|
@ -16,10 +16,8 @@ import (
|
|||
/*CliCfg is the cli configuration structure, might be unexported*/
|
||||
type cliConfig struct {
|
||||
configured bool
|
||||
simulation bool /*are we in simulation mode*/
|
||||
configFolder string `yaml:"cliconfig,omitempty"` /*overload ~/.cscli/*/
|
||||
output string /*output is human, json*/
|
||||
logLevel log.Level /*debug,info,warning,error*/
|
||||
configFolder string `yaml:"cliconfig,omitempty"` /*overload ~/.cscli/*/
|
||||
output string /*output is human, json*/
|
||||
hubFolder string
|
||||
InstallFolder string `yaml:"installdir"` /*/etc/crowdsec/*/
|
||||
BackendPluginFolder string `yaml:"backend"`
|
||||
|
|
|
@ -128,7 +128,7 @@ func downloadMetabaseDB(force bool) error {
|
|||
metabaseDBSubpath := path.Join(metabaseDbPath, "metabase.db")
|
||||
|
||||
_, err := os.Stat(metabaseDBSubpath)
|
||||
if err == nil && force == false {
|
||||
if err == nil && !force {
|
||||
log.Printf("%s exists, skip.", metabaseDBSubpath)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -41,23 +41,6 @@ func InstallItem(name string, obtype string) {
|
|||
/*iterate of pkg index data*/
|
||||
}
|
||||
|
||||
func InstallScenario(name string) {
|
||||
InstallItem(name, cwhub.SCENARIOS)
|
||||
}
|
||||
|
||||
func InstallCollection(name string) {
|
||||
InstallItem(name, cwhub.COLLECTIONS)
|
||||
|
||||
}
|
||||
|
||||
func InstallParser(name string) {
|
||||
InstallItem(name, cwhub.PARSERS)
|
||||
}
|
||||
|
||||
func InstallPostoverflow(name string) {
|
||||
InstallItem(name, cwhub.PARSERS_OVFLW)
|
||||
}
|
||||
|
||||
func NewInstallCmd() *cobra.Command {
|
||||
/* ---- INSTALL COMMAND */
|
||||
|
||||
|
|
|
@ -135,5 +135,7 @@ API interaction:
|
|||
rootCmd.AddCommand(NewDashboardCmd())
|
||||
rootCmd.AddCommand(NewInspectCmd())
|
||||
|
||||
rootCmd.Execute()
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
log.Fatalf("While executing root command : %s", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,9 +51,9 @@ func UpgradeConfig(ttype string, name string) {
|
|||
}
|
||||
cwhub.HubIdx[ttype][v.Name] = v
|
||||
}
|
||||
if found == false {
|
||||
if !found {
|
||||
log.Errorf("Didn't find %s", name)
|
||||
} else if updated == 0 && found == true {
|
||||
} else if updated == 0 && found {
|
||||
log.Errorf("Nothing to update")
|
||||
} else if updated != 0 {
|
||||
log.Infof("Upgraded %d items", updated)
|
||||
|
@ -93,14 +93,14 @@ cscli upgrade --force # Overwrite tainted configuration
|
|||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if upgrade_all == false && len(args) < 2 {
|
||||
if !upgrade_all && len(args) < 2 {
|
||||
_ = cmd.Help()
|
||||
return
|
||||
}
|
||||
if err := cwhub.GetHubIdx(); err != nil {
|
||||
log.Fatalf("Failed to get Hub index : %v", err)
|
||||
}
|
||||
if upgrade_all == true && len(args) == 0 {
|
||||
if upgrade_all && len(args) == 0 {
|
||||
log.Warningf("Upgrade all : parsers, scenarios, collections.")
|
||||
UpgradeConfig(cwhub.PARSERS, "")
|
||||
UpgradeConfig(cwhub.PARSERS_OVFLW, "")
|
||||
|
@ -127,7 +127,7 @@ cscli upgrade --force # Overwrite tainted configuration
|
|||
if len(args) == 1 {
|
||||
UpgradeConfig(cwhub.PARSERS, args[0])
|
||||
//UpgradeConfig(cwhub.PARSERS_OVFLW, "")
|
||||
} else if upgrade_all == true {
|
||||
} else if upgrade_all {
|
||||
UpgradeConfig(cwhub.PARSERS, "")
|
||||
} else {
|
||||
_ = cmd.Help()
|
||||
|
@ -148,7 +148,7 @@ cscli upgrade --force # Overwrite tainted configuration
|
|||
}
|
||||
if len(args) == 1 {
|
||||
UpgradeConfig(cwhub.SCENARIOS, args[0])
|
||||
} else if upgrade_all == true {
|
||||
} else if upgrade_all {
|
||||
UpgradeConfig(cwhub.SCENARIOS, "")
|
||||
} else {
|
||||
_ = cmd.Help()
|
||||
|
@ -170,7 +170,7 @@ cscli upgrade --force # Overwrite tainted configuration
|
|||
}
|
||||
if len(args) == 1 {
|
||||
UpgradeConfig(cwhub.COLLECTIONS, args[0])
|
||||
} else if upgrade_all == true {
|
||||
} else if upgrade_all {
|
||||
UpgradeConfig(cwhub.COLLECTIONS, "")
|
||||
} else {
|
||||
_ = cmd.Help()
|
||||
|
@ -193,7 +193,7 @@ cscli upgrade --force # Overwrite tainted configuration
|
|||
}
|
||||
if len(args) == 1 {
|
||||
UpgradeConfig(cwhub.PARSERS_OVFLW, args[0])
|
||||
} else if upgrade_all == true {
|
||||
} else if upgrade_all {
|
||||
UpgradeConfig(cwhub.PARSERS_OVFLW, "")
|
||||
} else {
|
||||
_ = cmd.Help()
|
||||
|
|
|
@ -98,7 +98,7 @@ func main() {
|
|||
|
||||
log.Infof("Crowdwatch %s", cwversion.VersionStr())
|
||||
|
||||
if cConfig.Prometheus == true {
|
||||
if cConfig.Prometheus {
|
||||
registerPrometheus()
|
||||
cConfig.Profiling = true
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func main() {
|
|||
}
|
||||
|
||||
/*enable profiling*/
|
||||
if cConfig.Profiling == true {
|
||||
if cConfig.Profiling {
|
||||
go runTachymeter(cConfig.HTTPListen)
|
||||
parserCTX.Profiling = true
|
||||
postOverflowCTX.Profiling = true
|
||||
|
@ -233,7 +233,7 @@ func main() {
|
|||
log.Fatalf("unable to restore buckets : %s", err)
|
||||
}
|
||||
}
|
||||
if cConfig.Profiling == true {
|
||||
if cConfig.Profiling {
|
||||
//force the profiling in all buckets
|
||||
for holderIndex := range holders {
|
||||
holders[holderIndex].Profiling = true
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"runtime"
|
||||
)
|
||||
|
||||
|
@ -67,7 +68,7 @@ var globalBucketPourOk = prometheus.NewCounter(
|
|||
|
||||
func dumpMetrics() {
|
||||
|
||||
if cConfig.DumpBuckets == true {
|
||||
if cConfig.DumpBuckets {
|
||||
log.Infof("!! Dumping buckets state")
|
||||
if err := leaky.DumpBucketsStateAt("buckets_state.json", time.Now(), buckets); err != nil {
|
||||
log.Fatalf("Failed dumping bucket state : %s", err)
|
||||
|
@ -98,7 +99,9 @@ func dumpMetrics() {
|
|||
log.Infof("Lines never poured : %d (%.2f%%)", linesPouredKO, float64(linesPouredKO)/float64(linesPouredOK)*100.0)
|
||||
}
|
||||
log.Infof("Writting metrics dump to %s", cConfig.WorkingFolder+"/crowdsec.profile")
|
||||
prometheus.WriteToTextfile(cConfig.WorkingFolder+"/crowdsec.profile", prometheus.DefaultGatherer)
|
||||
if err := prometheus.WriteToTextfile(cConfig.WorkingFolder+"/crowdsec.profile", prometheus.DefaultGatherer); err != nil {
|
||||
log.Errorf("failed to write metrics to %s : %s", cConfig.WorkingFolder+"/crowdsec.profile", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ LOOP:
|
|||
if cConfig.Profiling {
|
||||
start = time.Now()
|
||||
}
|
||||
if event.Process == false {
|
||||
if !event.Process {
|
||||
if cConfig.Profiling {
|
||||
atomic.AddUint64(&linesReadKO, 1)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ LOOP:
|
|||
log.Errorf("failed parsing : %v\n", error)
|
||||
return errors.New("parsing failed :/")
|
||||
}
|
||||
if parsed.Process == false {
|
||||
if !parsed.Process {
|
||||
if cConfig.Profiling {
|
||||
globalParserHitsKo.With(prometheus.Labels{"source": event.Line.Src}).Inc()
|
||||
atomic.AddUint64(&linesParsedKO, 1)
|
||||
|
@ -57,7 +57,7 @@ LOOP:
|
|||
atomic.AddUint64(&linesParsedOK, 1)
|
||||
}
|
||||
processCPT++
|
||||
if parsed.Whitelisted == true {
|
||||
if parsed.Whitelisted {
|
||||
log.Debugf("event whitelisted, discard")
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -37,7 +37,9 @@ LOOP:
|
|||
log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err)
|
||||
} else {
|
||||
log.Warningf("Starting buckets garbage collection ...")
|
||||
leaky.GarbageCollectBuckets(*z, buckets)
|
||||
if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil {
|
||||
return fmt.Errorf("failed to start bucket GC : %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -57,7 +59,9 @@ LOOP:
|
|||
if cConfig.Profiling {
|
||||
bucketStat.AddTime(time.Since(start))
|
||||
}
|
||||
lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime))
|
||||
if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal item : %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Infof("Sending signal Bucketify")
|
||||
|
|
|
@ -117,7 +117,7 @@ func serveOneTimeRun(outputRunner outputs.Output) error {
|
|||
|
||||
func serve(outputRunner outputs.Output) error {
|
||||
var err error
|
||||
if cConfig.Daemonize == true {
|
||||
if cConfig.Daemonize {
|
||||
if err = serveDaemon(); err != nil {
|
||||
return fmt.Errorf(err.Error())
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func (c *Crowdwatch) GetOPT() error {
|
|||
|
||||
flag.Parse()
|
||||
|
||||
if *printVersion == true {
|
||||
if *printVersion {
|
||||
cwversion.Show()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func (c *Crowdwatch) GetOPT() error {
|
|||
if *AcquisitionFile != "" {
|
||||
c.AcquisitionFile = *AcquisitionFile
|
||||
}
|
||||
if *dumpMode == true {
|
||||
if *dumpMode {
|
||||
c.DumpBuckets = true
|
||||
}
|
||||
if *prometheus {
|
||||
|
|
|
@ -187,7 +187,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
|
|||
}
|
||||
|
||||
//if it's not a symlink and not in hub, it's a local file, don't bother
|
||||
if local == true && inhub == false {
|
||||
if local && !inhub {
|
||||
log.Debugf("%s is a local file, skip", path)
|
||||
skippedLocal++
|
||||
// log.Printf("local scenario, skip.")
|
||||
|
@ -252,8 +252,7 @@ func parser_visit(path string, f os.FileInfo, err error) error {
|
|||
continue
|
||||
} else {
|
||||
/*we got an exact match, update struct*/
|
||||
// log.Printf("got exact match")
|
||||
if inhub == false {
|
||||
if !inhub {
|
||||
log.Debugf("found exact match for %s, version is %s, latest is %s", v.Name, version, v.Version)
|
||||
v.LocalPath = path
|
||||
v.LocalVersion = version
|
||||
|
@ -275,11 +274,11 @@ func parser_visit(path string, f os.FileInfo, err error) error {
|
|||
|
||||
}
|
||||
}
|
||||
if match == false {
|
||||
if !match {
|
||||
log.Debugf("got tainted match for %s : %s", v.Name, path)
|
||||
skippedTainted += 1
|
||||
//the file and the stage is right, but the hash is wrong, it has been tainted by user
|
||||
if inhub == false {
|
||||
if !inhub {
|
||||
v.LocalPath = path
|
||||
v.Installed = true
|
||||
}
|
||||
|
@ -321,13 +320,13 @@ func CollecDepsCheck(v *Item) error {
|
|||
}
|
||||
|
||||
//propagate the state of sub-items to set
|
||||
if val.Tainted == true {
|
||||
if val.Tainted {
|
||||
v.Tainted = true
|
||||
return fmt.Errorf("tainted %s %s, tainted.", ptrtype, p)
|
||||
} else if val.Installed == false && v.Installed == true {
|
||||
} else if !val.Installed && v.Installed {
|
||||
v.Tainted = true
|
||||
return fmt.Errorf("missing %s %s, tainted.", ptrtype, p)
|
||||
} else if val.UpToDate == false {
|
||||
} else if !val.UpToDate {
|
||||
v.UpToDate = false
|
||||
return fmt.Errorf("outdated %s %s", ptrtype, p)
|
||||
}
|
||||
|
@ -553,14 +552,14 @@ func DisableItem(target Item, tdir string, hdir string, purge bool) (Item, error
|
|||
func EnableItem(target Item, tdir string, hdir string) (Item, error) {
|
||||
parent_dir := filepath.Clean(tdir + "/" + target.Type + "/" + target.Stage + "/")
|
||||
/*create directories if needed*/
|
||||
if target.Installed == true {
|
||||
if target.Tainted == true {
|
||||
if target.Installed {
|
||||
if target.Tainted {
|
||||
return target, fmt.Errorf("%s is tainted, won't enable unless --force", target.Name)
|
||||
}
|
||||
if target.Local == true {
|
||||
if target.Local {
|
||||
return target, fmt.Errorf("%s is local, won't enable", target.Name)
|
||||
}
|
||||
if target.UpToDate == true {
|
||||
if target.UpToDate {
|
||||
log.Debugf("%s is installed and up-to-date, skip.", target.Name)
|
||||
return target, nil
|
||||
}
|
||||
|
@ -690,7 +689,9 @@ func DownloadItem(target Item, tdir string, overwrite bool) (Item, error) {
|
|||
return target, err
|
||||
}
|
||||
h := sha256.New()
|
||||
h.Write([]byte(body))
|
||||
if _, err := h.Write([]byte(body)); err != nil {
|
||||
return target, fmt.Errorf("%s : failed to write : %s", target.Name, err)
|
||||
}
|
||||
meow := fmt.Sprintf("%x", h.Sum(nil))
|
||||
if meow != target.Versions[target.Version].Digest {
|
||||
log.Errorf("Downloaded version doesn't match index, please 'hub update'")
|
||||
|
@ -737,7 +738,7 @@ func ItemStatus(v Item) (string, bool, bool, bool) {
|
|||
var Ok, Warning, Managed bool
|
||||
var strret string
|
||||
|
||||
if v.Installed == false {
|
||||
if !v.Installed {
|
||||
strret = "disabled"
|
||||
Ok = false
|
||||
} else {
|
||||
|
@ -745,7 +746,7 @@ func ItemStatus(v Item) (string, bool, bool, bool) {
|
|||
strret = "enabled"
|
||||
}
|
||||
|
||||
if v.Local == true {
|
||||
if v.Local {
|
||||
Managed = false
|
||||
strret += ",local"
|
||||
} else {
|
||||
|
|
|
@ -134,7 +134,9 @@ func (b *BackendManager) DeleteAll() error {
|
|||
// Insert the signal for the plugin specified in the config["plugin"] parameter
|
||||
func (b *BackendManager) InsertOnePlugin(sig types.SignalOccurence, pluginName string) error {
|
||||
if val, ok := b.backendPlugins[pluginName]; ok {
|
||||
val.funcs.Insert(sig)
|
||||
if err := val.funcs.Insert(sig); err != nil {
|
||||
return fmt.Errorf("failed to load %s : %s", pluginName, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("plugin '%s' not loaded", pluginName)
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ func FromFactory(g BucketFactory) *Leaky {
|
|||
} else {
|
||||
limiter = rate.NewLimiter(rate.Every(g.leakspeed), g.Capacity)
|
||||
}
|
||||
if g.Profiling == true {
|
||||
if g.Profiling {
|
||||
BucketsInstanciation.With(prometheus.Labels{"name": g.Name}).Inc()
|
||||
}
|
||||
//create the leaky bucket per se
|
||||
|
@ -208,7 +208,7 @@ func LeakRoutine(l *Leaky) {
|
|||
l.logger.Tracef("Pour event: %s", spew.Sdump(msg))
|
||||
l.logger.Debugf("Pouring event.")
|
||||
|
||||
if l.Profiling == true {
|
||||
if l.Profiling {
|
||||
BucketsPour.With(prometheus.Labels{"name": l.Name, "source": msg.Line.Src}).Inc()
|
||||
}
|
||||
l.Pour(l, msg) // glue for now
|
||||
|
@ -236,7 +236,7 @@ func LeakRoutine(l *Leaky) {
|
|||
l.logger.Tracef("Overflow event: %s", spew.Sdump(types.Event{Overflow: sig}))
|
||||
mt, _ := l.Ovflw_ts.MarshalText()
|
||||
l.logger.Tracef("overflow time : %s", mt)
|
||||
if l.Profiling == true {
|
||||
if l.Profiling {
|
||||
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
}
|
||||
l.AllOut <- types.Event{Overflow: sig, Type: types.OVFLW, MarshaledTime: string(mt)}
|
||||
|
@ -249,7 +249,7 @@ func LeakRoutine(l *Leaky) {
|
|||
sig := types.SignalOccurence{MapKey: l.Mapkey}
|
||||
|
||||
if l.timedOverflow {
|
||||
if l.Profiling == true {
|
||||
if l.Profiling {
|
||||
BucketsOverflow.With(prometheus.Labels{"name": l.Name}).Inc()
|
||||
}
|
||||
sig = FormatOverflow(l, ofw)
|
||||
|
|
|
@ -82,7 +82,10 @@ func testOneBucket(t *testing.T, dir string) error {
|
|||
files = append(files, x.Filename)
|
||||
}
|
||||
holders, response, err := LoadBuckets(files)
|
||||
if testFile(t, dir+"/test.yaml", dir+"/in-buckets_state.json", holders, response) == false {
|
||||
if err != nil {
|
||||
t.Fatalf("failed loading bucket : %s", err)
|
||||
}
|
||||
if !testFile(t, dir+"/test.yaml", dir+"/in-buckets_state.json", holders, response) {
|
||||
t.Fatalf("the test failed")
|
||||
}
|
||||
return nil
|
||||
|
@ -241,7 +244,7 @@ POLL_AGAIN:
|
|||
|
||||
//CheckFailed:
|
||||
|
||||
if valid == true {
|
||||
if valid {
|
||||
log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx)
|
||||
//don't do this at home : delete current element from list and redo
|
||||
results[eidx] = results[len(results)-1]
|
||||
|
@ -252,7 +255,7 @@ POLL_AGAIN:
|
|||
}
|
||||
}
|
||||
}
|
||||
if valid == false {
|
||||
if !valid {
|
||||
t.Fatalf("mismatching entries left")
|
||||
} else {
|
||||
log.Warningf("entry valid at end of loop")
|
||||
|
|
|
@ -189,7 +189,7 @@ func LoadBucketDir(dir string) ([]BucketFactory, chan types.Event, error) {
|
|||
/* Init recursively process yaml files from a directory and loads them as BucketFactory */
|
||||
func LoadBucket(g *BucketFactory) error {
|
||||
var err error
|
||||
if g.Debug == true {
|
||||
if g.Debug {
|
||||
var clog = logrus.New()
|
||||
clog.SetFormatter(&log.TextFormatter{FullTimestamp: true})
|
||||
clog.SetLevel(log.DebugLevel)
|
||||
|
@ -326,12 +326,12 @@ func LoadBucketsState(file string, buckets *Buckets, holders []BucketFactory) er
|
|||
tbucket.Total_count = v.Total_count
|
||||
buckets.Bucket_map.Store(k, tbucket)
|
||||
go LeakRoutine(tbucket)
|
||||
_ = <-tbucket.Signal
|
||||
<-tbucket.Signal
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found == false {
|
||||
if !found {
|
||||
log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v))
|
||||
}
|
||||
}
|
||||
|
@ -530,7 +530,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc
|
|||
go LeakRoutine(fresh_bucket)
|
||||
log.Debugf("Created new bucket %s", buckey)
|
||||
//wait for signal to be opened
|
||||
_ = <-fresh_bucket.Signal
|
||||
<-fresh_bucket.Signal
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, ty
|
|||
return s, q
|
||||
}
|
||||
/*filter returned false, event is blackholded*/
|
||||
if element == false {
|
||||
if !element {
|
||||
l.logger.Infof("Event is discard by overflow filter (%s)", u.Filter)
|
||||
return types.SignalOccurence{
|
||||
MapKey: l.Mapkey,
|
||||
|
|
|
@ -35,11 +35,11 @@ func OvflwToOrder(sig types.SignalOccurence, prof types.Profile) (*types.BanOrde
|
|||
var warn error
|
||||
|
||||
//Identify remediation type
|
||||
if prof.Remediation.Ban == true {
|
||||
if prof.Remediation.Ban {
|
||||
ordr.MeasureType = "ban"
|
||||
} else if prof.Remediation.Slow == true {
|
||||
} else if prof.Remediation.Slow {
|
||||
ordr.MeasureType = "slow"
|
||||
} else if prof.Remediation.Captcha == true {
|
||||
} else if prof.Remediation.Captcha {
|
||||
ordr.MeasureType = "captcha"
|
||||
} else {
|
||||
/*if the profil has no remediation, no order */
|
||||
|
@ -120,10 +120,10 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
|
|||
logger.Warningf("failed to run filter : %v", err)
|
||||
continue
|
||||
}
|
||||
switch output.(type) {
|
||||
switch out := output.(type) {
|
||||
case bool:
|
||||
/* filter returned false, don't process Node */
|
||||
if output.(bool) == false {
|
||||
if !out {
|
||||
logger.Debugf("eval(FALSE) '%s'", profile.Filter)
|
||||
continue
|
||||
}
|
||||
|
@ -158,7 +158,9 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
|
|||
// if ApiPush is nil (not specified in profile configuration) we use global api config (from default.yaml)
|
||||
if profile.ApiPush == nil {
|
||||
if o.API != nil { // if API is not nil, we can push
|
||||
o.API.AppendSignal((sig))
|
||||
if err = o.API.AppendSignal((sig)); err != nil {
|
||||
return fmt.Errorf("failed to append signal : %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, outputConfig := range profile.OutputConfigs {
|
||||
|
@ -173,7 +175,9 @@ func (o *Output) ProcessOutput(sig types.SignalOccurence, profiles []types.Profi
|
|||
continue
|
||||
}
|
||||
}
|
||||
o.bManager.InsertOnePlugin(sig, pluginName)
|
||||
if err = o.bManager.InsertOnePlugin(sig, pluginName); err != nil {
|
||||
return fmt.Errorf("failed to insert plugin %s : %s", pluginName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ func (n *Node) validate(pctx *UnixParserCtx) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
if method_found == false {
|
||||
if !method_found {
|
||||
return fmt.Errorf("the method '%s' doesn't exist", static.Method)
|
||||
}
|
||||
} else {
|
||||
|
@ -120,10 +120,10 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
clog.Debugf("Event leaving node : ko")
|
||||
return false, nil
|
||||
}
|
||||
switch output.(type) {
|
||||
switch out := output.(type) {
|
||||
case bool:
|
||||
/* filter returned false, don't process Node */
|
||||
if output.(bool) == false {
|
||||
if !out {
|
||||
NodeState = false
|
||||
clog.Debugf("eval(FALSE) '%s'", n.Filter)
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
|
@ -142,7 +142,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
NodeState = true
|
||||
}
|
||||
|
||||
if n.Profiling == true && n.Name != "" {
|
||||
if n.Profiling && n.Name != "" {
|
||||
NodesHits.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
}
|
||||
set := false
|
||||
|
@ -188,14 +188,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
switch output.(type) {
|
||||
case bool:
|
||||
/* filter returned false, don't process Node */
|
||||
if output.(bool) == true {
|
||||
if output.(bool) {
|
||||
clog.Infof("Event is whitelisted by Expr !")
|
||||
p.Whitelisted = true
|
||||
set = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if set == true {
|
||||
if set {
|
||||
p.WhiteListReason = n.Whitelist.Reason
|
||||
/*huglily wipe the ban order if the event is whitelisted and it's an overflow */
|
||||
if p.Type == types.OVFLW { /*don't do this at home kids */
|
||||
|
@ -217,7 +217,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess)
|
||||
if ret == true {
|
||||
if ret {
|
||||
NodeState = true
|
||||
/* if chil is successful, stop processing */
|
||||
if n.OnSuccess == "next_stage" {
|
||||
|
@ -280,15 +280,15 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
}
|
||||
|
||||
//grok or leafs failed, don't process statics
|
||||
if NodeState == false {
|
||||
if n.Profiling == true && n.Name != "" {
|
||||
if !NodeState {
|
||||
if n.Profiling && n.Name != "" {
|
||||
NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
}
|
||||
clog.Debugf("Event leaving node : ko")
|
||||
return NodeState, nil
|
||||
}
|
||||
|
||||
if n.Profiling == true && n.Name != "" {
|
||||
if n.Profiling && n.Name != "" {
|
||||
NodesHitsOk.With(prometheus.Labels{"source": p.Line.Src, "name": n.Name}).Inc()
|
||||
}
|
||||
if len(n.Statics) > 0 {
|
||||
|
@ -302,7 +302,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx) (bool, error) {
|
|||
clog.Tracef("! No node statics")
|
||||
}
|
||||
|
||||
if NodeState == true {
|
||||
if NodeState {
|
||||
clog.Debugf("Event leaving node : ok")
|
||||
log.Tracef("node is successful, check strategy")
|
||||
if n.OnSuccess == "next_stage" {
|
||||
|
@ -336,7 +336,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
|
|||
log.Debugf("compile, node is %s", n.Stage)
|
||||
/* if the node has debugging enabled, create a specific logger with debug
|
||||
that will be used only for processing this node ;) */
|
||||
if n.Debug == true {
|
||||
if n.Debug {
|
||||
var clog = logrus.New()
|
||||
clog.SetLevel(log.DebugLevel)
|
||||
n.logger = clog.WithFields(log.Fields{
|
||||
|
@ -414,10 +414,10 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
|
|||
if len(n.SuccessNodes) > 0 {
|
||||
for idx, _ := range n.SuccessNodes {
|
||||
/*propagate debug/stats to child nodes*/
|
||||
if n.SuccessNodes[idx].Debug == false && n.Debug == true {
|
||||
if !n.SuccessNodes[idx].Debug && n.Debug {
|
||||
n.SuccessNodes[idx].Debug = true
|
||||
}
|
||||
if n.SuccessNodes[idx].Profiling == false && n.Profiling == true {
|
||||
if !n.SuccessNodes[idx].Profiling && n.Profiling {
|
||||
n.SuccessNodes[idx].Profiling = true
|
||||
}
|
||||
n.SuccessNodes[idx].Stage = n.Stage
|
||||
|
@ -468,7 +468,7 @@ func (n *Node) compile(pctx *UnixParserCtx) error {
|
|||
valid = true
|
||||
}
|
||||
|
||||
if valid == false {
|
||||
if !valid {
|
||||
/* node is empty, error force return */
|
||||
n.logger.Infof("Node is empty: %s", spew.Sdump(n))
|
||||
n.Stage = ""
|
||||
|
|
|
@ -48,7 +48,7 @@ func testOneParser(t *testing.T, dir string) error {
|
|||
var p UnixParser
|
||||
var pctx *UnixParserCtx
|
||||
var err error
|
||||
var pnodes []Node = make([]Node, 0)
|
||||
var pnodes []Node
|
||||
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
|
@ -64,7 +64,7 @@ func testOneParser(t *testing.T, dir string) error {
|
|||
//Init the enricher
|
||||
pplugins, err := Loadplugin(datadir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to load plugin geoip : %v", err)
|
||||
return fmt.Errorf("failed to load plugin geoip : %v", err)
|
||||
}
|
||||
ECTX = append(ECTX, pplugins)
|
||||
log.Debugf("Geoip ctx : %v", ECTX)
|
||||
|
@ -92,7 +92,7 @@ func testOneParser(t *testing.T, dir string) error {
|
|||
|
||||
pnodes, err = LoadStages(parser_configs, pctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to load parser config : %s", err)
|
||||
return fmt.Errorf("unable to load parser config : %s", err)
|
||||
}
|
||||
|
||||
//TBD: Load post overflows
|
||||
|
@ -238,7 +238,7 @@ func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool
|
|||
|
||||
CheckFailed:
|
||||
|
||||
if valid == true {
|
||||
if valid {
|
||||
//log.Infof("Found result [%s], skip", spew.Sdump(tf.Results[ridx]))
|
||||
log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx)
|
||||
//don't do this at home : delete current element from list and redo
|
||||
|
|
|
@ -170,7 +170,7 @@ func ProcessStatics(statics []types.ExtraField, p *types.Event, clog *logrus.Ent
|
|||
clog.Warningf("method '%s' doesn't exist", static.Method)
|
||||
}
|
||||
}
|
||||
if processed == false {
|
||||
if !processed {
|
||||
clog.Warningf("method '%s' doesn't exist", static.Method)
|
||||
}
|
||||
} else if static.Parsed != "" {
|
||||
|
@ -278,7 +278,7 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N
|
|||
continue
|
||||
}
|
||||
clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn)
|
||||
if ctx.Profiling == true {
|
||||
if ctx.Profiling {
|
||||
node.Profiling = true
|
||||
}
|
||||
ret, err := node.process(&event, ctx)
|
||||
|
@ -286,10 +286,10 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N
|
|||
clog.Fatalf("Error while processing node : %v", err)
|
||||
}
|
||||
clog.Tracef("node (%s) ret : %v", node.rn, ret)
|
||||
if ret == true {
|
||||
if ret {
|
||||
isStageOK = true
|
||||
}
|
||||
if ret == true && node.OnSuccess == "next_stage" {
|
||||
if ret && node.OnSuccess == "next_stage" {
|
||||
clog.Debugf("node successful, stop end stage %s", stage)
|
||||
break
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ func /*(u types.UnixParser)*/ Parse(ctx UnixParserCtx, xp types.Event, nodes []N
|
|||
break
|
||||
}
|
||||
}
|
||||
if isStageOK == false {
|
||||
if !isStageOK {
|
||||
log.Debugf("Log didn't finish stage %s", event.Stage)
|
||||
event.Process = false
|
||||
return event, nil
|
||||
|
|
|
@ -472,6 +472,8 @@ func BenchmarkWaitNNoDelay(b *testing.B) {
|
|||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
lim.WaitN(ctx, 1)
|
||||
if err := lim.WaitN(ctx, 1); err != nil {
|
||||
b.Errorf("failed limiter : %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue