allowlists: automatically expire current matching decisions on update (#3601)

This commit is contained in:
blotus 2025-05-06 14:10:30 +02:00 committed by GitHub
parent f8f0b2a211
commit 959b872118
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 186 additions and 7 deletions

View file

@ -488,6 +488,14 @@ func (cli *cliAllowLists) add(ctx context.Context, db *database.Client, name str
fmt.Printf("added %d values to allowlist %s\n", added, name)
}
deleted, err := db.ApplyAllowlistsToExistingDecisions(ctx)
if err != nil {
return fmt.Errorf("unable to apply allowlists to existing decisions: %w", err)
}
if deleted > 0 {
fmt.Printf("%d decisions deleted by allowlists\n", deleted)
}
return nil
}

View file

@ -588,6 +588,8 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio
func (a *apic) PullTop(ctx context.Context, forcePull bool) error {
var err error
hasPulledAllowlists := false
// A mutex with TryLock would be a bit simpler
// But go does not guarantee that TryLock will be able to acquire the lock even if it is available
select {
@ -649,7 +651,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error {
// process deleted decisions
nbDeleted, err := a.HandleDeletedDecisionsV3(ctx, data.Deleted, deleteCounters)
if err != nil {
return err
log.Errorf("could not delete decisions from CAPI: %s", err)
}
log.Printf("capi/community-blocklist : %d explicit deletions", nbDeleted)
@ -657,8 +659,9 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error {
// Update allowlists before processing decisions
if data.Links != nil {
if len(data.Links.Allowlists) > 0 {
hasPulledAllowlists = true
if err := a.UpdateAllowlists(ctx, data.Links.Allowlists, forcePull); err != nil {
return fmt.Errorf("while updating allowlists: %w", err)
log.Errorf("could not update allowlists from CAPI: %s", err)
}
}
}
@ -675,7 +678,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error {
err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters)
if err != nil {
return fmt.Errorf("while saving alerts: %w", err)
log.Errorf("could not save alert for CAPI pull: %s", err)
}
} else {
if a.pullCommunity {
@ -689,11 +692,21 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error {
if data.Links != nil {
if len(data.Links.Blocklists) > 0 {
if err := a.UpdateBlocklists(ctx, data.Links.Blocklists, addCounters, forcePull); err != nil {
return fmt.Errorf("while updating blocklists: %w", err)
log.Errorf("could not update blocklists from CAPI: %s", err)
}
}
}
if hasPulledAllowlists {
deleted, err := a.dbClient.ApplyAllowlistsToExistingDecisions(ctx)
if err != nil {
log.Errorf("could not apply allowlists to existing decisions: %s", err)
}
if deleted > 0 {
log.Infof("deleted %d decisions from allowlists", deleted)
}
}
return nil
}

View file

@ -264,6 +264,14 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error {
if err != nil {
return fmt.Errorf("failed to force pull operation: %w", err)
}
deleted, err := p.DBClient.ApplyAllowlistsToExistingDecisions(ctx)
if err != nil {
log.Errorf("could not apply allowlists to existing decisions: %s", err)
}
if deleted > 0 {
log.Infof("deleted %d decisions from allowlists", deleted)
}
}
case "allowlist_unsubscribe":
data, err := json.Marshal(message.Data)

View file

@ -12,6 +12,8 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/database/ent"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
)
@ -389,3 +391,96 @@ func (c *Client) GetAllowlistsContentForAPIC(ctx context.Context) ([]net.IP, []*
return ips, nets, nil
}
func (c *Client) ApplyAllowlistsToExistingDecisions(ctx context.Context) (int, error) {
// Soft delete (set expiration to now) all decisions that matches any allowlist
totalCount := 0
// Get all non-expired allowlist items
// We will match them one by one against all decisions
allowlistItems, err := c.Ent.AllowListItem.Query().
Where(
allowlistitem.Or(
allowlistitem.ExpiresAtGTE(time.Now().UTC()),
allowlistitem.ExpiresAtIsNil(),
),
).All(ctx)
if err != nil {
return 0, fmt.Errorf("unable to get allowlist items: %w", err)
}
now := time.Now().UTC()
for _, item := range allowlistItems {
updateQuery := c.Ent.Decision.Update().SetUntil(now).Where(decision.UntilGTE(now))
switch item.IPSize {
case 4:
updateQuery = updateQuery.Where(
decision.And(
decision.IPSizeEQ(4),
decision.Or(
decision.And(
decision.StartIPLTE(item.StartIP),
decision.EndIPGTE(item.EndIP),
),
decision.And(
decision.StartIPGTE(item.StartIP),
decision.EndIPLTE(item.EndIP),
),
)))
case 16:
updateQuery = updateQuery.Where(
decision.And(
decision.IPSizeEQ(16),
decision.Or(
decision.And(
decision.Or(
decision.StartIPLT(item.StartIP),
decision.And(
decision.StartIPEQ(item.StartIP),
decision.StartSuffixLTE(item.StartSuffix),
)),
decision.Or(
decision.EndIPGT(item.EndIP),
decision.And(
decision.EndIPEQ(item.EndIP),
decision.EndSuffixGTE(item.EndSuffix),
),
),
),
decision.And(
decision.Or(
decision.StartIPGT(item.StartIP),
decision.And(
decision.StartIPEQ(item.StartIP),
decision.StartSuffixGTE(item.StartSuffix),
)),
decision.Or(
decision.EndIPLT(item.EndIP),
decision.And(
decision.EndIPEQ(item.EndIP),
decision.EndSuffixLTE(item.EndSuffix),
),
),
),
),
),
)
default:
// This should never happen
// But better safe than sorry and just skip it instead of expiring all decisions
c.Log.Errorf("unexpected IP size %d for allowlist item %s", item.IPSize, item.Value)
continue
}
// Update the decisions
count, err := updateQuery.Save(ctx)
if err != nil {
c.Log.Errorf("unable to expire existing decisions: %s", err)
continue
}
totalCount += count
}
return totalCount, nil
}

View file

@ -246,3 +246,58 @@ teardown() {
rune -0 jq 'del(.created_at) | del(.updated_at) | del(.items.[].created_at) | del(.items.[].expiration)' <(output)
assert_json '{"description":"a foo","items":[],"name":"foo"}'
}
@test "allowlists expire active decisions" {
rune -0 cscli decisions add -i 1.2.3.4
rune -0 cscli decisions add -r 2.3.4.0/24
rune -0 cscli decisions add -i 5.4.3.42
rune -0 cscli decisions add -r 6.5.4.0/24
rune -0 cscli decisions add -r 10.0.0.0/23
rune -0 cscli decisions list -o json
rune -0 jq -r 'sort_by(.decisions[].value) | .[].decisions[0].value' <(output)
assert_output - <<-EOT
1.2.3.4
10.0.0.0/23
2.3.4.0/24
5.4.3.42
6.5.4.0/24
EOT
rune -0 cscli allowlists create foo -d "foo"
# add an allowlist that matches exactly
rune -0 cscli allowlists add foo 1.2.3.4
if is_db_mysql; then sleep 2; fi
# it should not be here anymore
rune -0 cscli decisions list -o json
rune -0 jq -e 'any(.[].decisions[]; .value == "1.2.3.4") | not' <(output)
# allowlist an IP belonging to a range
rune -0 cscli allowlist add foo 2.3.4.42
if is_db_mysql; then sleep 2; fi
rune -0 cscli decisions list -o json
rune -0 jq -e 'any(.[].decisions[]; .value == "2.3.4.0/24") | not' <(output)
# allowlist a range with an active decision inside
rune -0 cscli allowlist add foo 5.4.3.0/24
if is_db_mysql; then sleep 2; fi
rune -0 cscli decisions list -o json
rune -0 jq -e 'any(.[].decisions[]; .value == "5.4.3.42") | not' <(output)
# allowlist a range inside a range for which we have a decision
rune -0 cscli allowlist add foo 6.5.4.0/25
if is_db_mysql; then sleep 2; fi
rune -0 cscli decisions list -o json
rune -0 jq -e 'any(.[].decisions[]; .value == "6.5.4.0/24") | not' <(output)
# allowlist a range bigger than a range for which we have a decision
rune -0 cscli allowlist add foo 10.0.0.0/24
if is_db_mysql; then sleep 2; fi
rune -0 cscli decisions list -o json
rune -0 jq -e 'any(.[].decisions[]; .value == "10.0.0.0/24") | not' <(output)
# sanity check no more active decisions
rune -0 cscli decisions list -o json
assert_json []
}

View file

@ -116,7 +116,7 @@ load_init_data() {
dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")"
if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then
die "Can't run with backend '${DB_BACKEND}' because the test data was built with '${dump_backend}'"
die "Can't run with backend '${DB_BACKEND}' because 'make bats-fixture' was ran with '${dump_backend}'"
fi
remove_init_data

View file

@ -168,7 +168,7 @@ load_init_data() {
dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")"
if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then
die "Can't run with backend '${DB_BACKEND}' because the test data was built with '${dump_backend}'"
die "Can't run with backend '${DB_BACKEND}' because 'make bats-fixture' was ran with '${dump_backend}'"
fi
remove_init_data

View file

@ -26,7 +26,7 @@ fi
dump_backend="$(cat "$LOCAL_INIT_DIR/.backend")"
if [[ "$DB_BACKEND" != "$dump_backend" ]]; then
die "Can't run with backend '$DB_BACKEND' because the test data was build with '$dump_backend'"
die "Can't run with backend '$DB_BACKEND' because 'make bats-fixture' was ran with '$dump_backend'"
fi
if [[ $# -ge 1 ]]; then