diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index d1e4a393555..e8ac6213530 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -21,9 +21,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { } for _, itemType := range cwhub.ItemTypes { - clog := log.WithFields(log.Fields{ - "type": itemType, - }) + clog := log.WithField("type", itemType) itemMap := hub.GetItemMap(itemType) if itemMap == nil { @@ -39,9 +37,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { upstreamParsers := []string{} for k, v := range itemMap { - clog = clog.WithFields(log.Fields{ - "file": v.Name, - }) + clog = clog.WithField("file", v.Name) if !v.State.Installed { // only backup installed ones clog.Debugf("[%s]: not installed", k) continue diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index ab7d954cac1..069bf67d852 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -161,9 +161,7 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr if err := types.ConfigureLogger(clog); err != nil { return nil, fmt.Errorf("while configuring datasource logger: %w", err) } - subLogger := clog.WithFields(log.Fields{ - "type": dsn, - }) + subLogger := clog.WithField("type", dsn) uniqueId := uuid.NewString() if transformExpr != "" { vm, err := expr.Compile(transformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index f97905406ce..07ca56dfb70 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -210,9 +210,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe runner := AppsecRunner{ inChan: w.InChan, UUID: appsecRunnerUUID, - logger: w.logger.WithFields(log.Fields{ - "runner_uuid": appsecRunnerUUID, - }), + logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), AppsecRuntime: &wrt, Labels: w.config.Labels, } diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 5fe4cfe236c..c769ea3d0fd 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -41,7 +41,7 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { InChan := make(chan appsec.ParsedRequest) OutChan := make(chan types.Event) - logger := log.WithFields(log.Fields{"test": test.name}) + logger := log.WithField("test", test.name) //build rules for ridx, rule := range test.inband_rules { diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1ac1465d390..1859bbf0f84 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -403,7 +403,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() } newStream.t = tomb.Tomb{} - newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName}) + newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { return cw.TailLogStream(&newStream, outChan) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 3a694b99d76..857d7e7af78 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -609,7 +609,7 @@ func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan * case newContainer := <-in: if _, ok := d.runningContainerState[newContainer.ID]; !ok { newContainer.t = &tomb.Tomb{} - newContainer.logger = d.logger.WithFields(log.Fields{"container_name": newContainer.Name}) + newContainer.logger = d.logger.WithField("container_name", newContainer.Name) newContainer.t.Go(func() error { return d.TailDocker(newContainer, outChan, deleteChan) }) diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index bcf0966a2d1..e1cc4db96ad 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -55,9 +55,7 @@ container_name: }, } - subLogger := log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger := log.WithField("type", "docker") for _, test := range tests { f := DockerSource{} @@ -108,9 +106,7 @@ func TestConfigureDSN(t *testing.T) { expectedErr: "", }, } - subLogger := log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger := log.WithField("type", "docker") for _, test := range tests { f := DockerSource{} @@ -169,13 +165,9 @@ container_name_regexp: if ts.expectedOutput != "" { logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = logger.WithField("type", "docker") } else { - subLogger = log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = log.WithField("type", "docker") } readLogs = false @@ -310,14 +302,10 @@ func TestOneShot(t *testing.T) { if ts.expectedOutput != "" { logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = logger.WithField("type", "docker") } else { log.SetLevel(ts.logLevel) - subLogger = log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = log.WithField("type", "docker") } readLogs = false diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 0873b837a3f..688812f2fd3 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -49,9 +49,7 @@ exclude_regexps: ["as[a-$d"]`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "file", - }) + subLogger := log.WithField("type", "file") for _, tc := range tests { tc := tc @@ -91,9 +89,7 @@ func TestConfigureDSN(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "file", - }) + subLogger := log.WithField("type", "file") for _, tc := range tests { tc := tc @@ -211,9 +207,7 @@ filename: test_files/test_delete.log`, logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") tomb := tomb.Tomb{} out := make(chan types.Event, 100) @@ -372,9 +366,7 @@ force_inotify: true`, testPattern), logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") tomb := tomb.Tomb{} out := make(chan types.Event) @@ -451,9 +443,7 @@ func TestExclusion(t *testing.T) { exclude_regexps: ["\\.gz$"]` logger, hook := test.NewNullLogger() // logger.SetLevel(ts.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") f := fileacquisition.FileSource{} if err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE); err != nil { diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 9d1f1bb7e0e..f381a227534 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -47,9 +47,7 @@ journalctl_filter: }, } - subLogger := log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger := log.WithField("type", "journalctl") for _, test := range tests { f := JournalCtlSource{} @@ -97,9 +95,7 @@ func TestConfigureDSN(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger := log.WithField("type", "journalctl") for _, test := range tests { f := JournalCtlSource{} @@ -153,13 +149,9 @@ journalctl_filter: if ts.expectedOutput != "" { logger, hook = test.NewNullLogger() logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = logger.WithField("type", "journalctl") } else { - subLogger = log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = log.WithField("type", "journalctl") } tomb := tomb.Tomb{} @@ -227,13 +219,9 @@ journalctl_filter: if ts.expectedOutput != "" { logger, hook = test.NewNullLogger() logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = logger.WithField("type", "journalctl") } else { - subLogger = log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = log.WithField("type", "journalctl") } tomb := tomb.Tomb{} diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 54715a81251..245d3ed58c8 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -149,9 +149,7 @@ func TestStreamingAcquisition(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") createTopic("crowdsecplaintext", "localhost:9092") @@ -222,9 +220,7 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") createTopic("crowdsecssl", "localhost:9092") diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 5d3cf8f80a0..485cefcf01d 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -334,7 +334,7 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { - logger := k.logger.WithFields(log.Fields{"shard_id": shardId}) + logger := k.logger.WithField("shard_id", shardId) //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately //and we won't be able to start a new one if this is the first one started by the tomb //TODO: look into parent shards to see if a shard is closed before starting to read it ? @@ -397,7 +397,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) } - k.logger = k.logger.WithFields(log.Fields{"stream": parsedARN.Resource[7:]}) + k.logger = k.logger.WithField("stream", parsedARN.Resource[7:]) k.logger.Info("starting kinesis acquisition with enhanced fan-out") err = k.DeregisterConsumer() if err != nil { @@ -439,7 +439,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { } func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { - logger := k.logger.WithFields(log.Fields{"shard": shardId}) + logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, @@ -485,7 +485,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro } func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { - k.logger = k.logger.WithFields(log.Fields{"stream": k.Config.StreamName}) + k.logger = k.logger.WithField("stream", k.Config.StreamName) k.logger.Info("starting kinesis acquisition from shards") for { shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index a4e4f2f7378..d1d398c129e 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -139,9 +139,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "kinesis", - }) + subLogger := log.WithField("type", "kinesis") for _, test := range tests { f := KinesisSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -171,9 +169,7 @@ stream_name: stream-1-shard`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -217,9 +213,7 @@ stream_name: stream-2-shards`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -266,9 +260,7 @@ from_subscription: true`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -312,9 +304,7 @@ use_enhanced_fanout: true`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - })) + err := f.Configure([]byte(config), log.WithField("type", "kinesis")) if err != nil { t.Fatalf("Error configuring source: %s", err) } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index 331822ecf5b..b6e6f6b03e9 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -67,9 +67,7 @@ webhook_path: /k8s-audit`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "k8s-audit", - }) + subLogger := log.WithField("type", "k8s-audit") for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -230,9 +228,7 @@ webhook_path: /k8s-audit`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "k8s-audit", - }) + subLogger := log.WithField("type", "k8s-audit") for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 83742546959..051a9b93ed5 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -124,9 +124,7 @@ query: > testName: "Invalid DelayFor", }, } - subLogger := log.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := log.WithField("type", "loki") for _, test := range tests { t.Run(test.testName, func(t *testing.T) { @@ -343,9 +341,7 @@ since: 1h for _, ts := range tests { logger := log.New() - subLogger := logger.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) @@ -509,9 +505,7 @@ query: > {server="demo"} ` logger := log.New() - subLogger := logger.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := logger.WithField("type", "loki") title := time.Now().String() lokiSource := loki.LokiSource{} diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index ba14c7db053..8096740f5e4 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -52,9 +52,7 @@ listen_addr: 10.0.0`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "syslog", - }) + subLogger := log.WithField("type", "syslog") for _, test := range tests { s := SyslogSource{} err := s.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -134,9 +132,7 @@ listen_addr: 127.0.0.1`, for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { - subLogger := log.WithFields(log.Fields{ - "type": "syslog", - }) + subLogger := log.WithField("type", "syslog") s := SyslogSource{} err := s.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index c937ceba825..2ea0e365be5 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -54,9 +54,7 @@ xpath_query: test`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { f := WinEventLogSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -113,9 +111,7 @@ event_level: bla`, expectedErr: "invalid log level", }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { f := WinEventLogSource{} f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -181,9 +177,7 @@ event_ids: expectedLines: nil, }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") evthandler, err := eventlog.Open("Application") diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 4561b8f7789..314a4da1046 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -75,9 +75,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } - logger = logger.WithFields(log.Fields{ - "cn": extractedCN, - }) + logger = logger.WithField("cn", extractedCN) bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) @@ -141,9 +139,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { clientIP := c.ClientIP() - logger := log.WithFields(log.Fields{ - "ip": clientIP, - }) + logger := log.WithField("ip", clientIP) if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { bouncer = a.authTLS(c, logger) @@ -158,9 +154,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return } - logger = logger.WithFields(log.Fields{ - "name": bouncer.Name, - }) + logger = logger.WithField("name", bouncer.Name) if bouncer.IPAddress == "" { if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index c2153ee2342..8a696caf1f4 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -64,9 +64,7 @@ func CacheInit(cfg CacheCfg) error { } clog.SetLevel(*cfg.LogLevel) - cfg.Logger = clog.WithFields(log.Fields{ - "cache": cfg.Name, - }) + cfg.Logger = clog.WithField("cache", cfg.Name) tmpCache := gcache.New(cfg.Size) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 6d4f35c285c..90bc9e057f9 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -46,7 +46,7 @@ func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader WithHTTPClient(hubClient). ToFile(destPath). CompareContent(). - WithLogger(logrus.WithFields(logrus.Fields{"url": dataS.SourceURL})) + WithLogger(logrus.WithField("url", dataS.SourceURL)) if !force { d = d.WithLastModified(). diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 4dad226fd78..d74544ddaed 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -125,7 +125,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin WithHTTPClient(hubClient). ToFile(destPath). WithMakeDirs(true). - WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + WithLogger(logrus.WithField("url", url)). CompareContent(). VerifyHash("sha256", wantHash) diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index abb2ddae2ad..0678a7488f8 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -46,7 +46,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e WithHTTPClient(hubClient). ToFile(destPath). CompareContent(). - WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + WithLogger(logrus.WithField("url", url)). Download(ctx, url) if err != nil { return false, err diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index a640ea79f48..4e54b07bfc2 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -45,10 +45,7 @@ func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.L if LogLevel != nil { clog.SetLevel(*LogLevel) } - customLog := log.Fields{ - "type": "crowdsec-cti", - } - subLogger := clog.WithFields(customLog) + subLogger := clog.WithField("type", "crowdsec-cti") CrowdsecCTIInitCache(*Size, *TTL) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) CTIApiEnabled = true diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 085ef5ca342..434667cbeaf 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -42,9 +42,7 @@ func TestDateParse(t *testing.T) { }, } - logger := log.WithFields(log.Fields{ - "test": "test", - }) + logger := log.WithField("test", "test") for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 4bb2c4c5eaf..bb57995a129 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -464,15 +464,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } clog.SetLevel(log.DebugLevel) - n.Logger = clog.WithFields(log.Fields{ - "id": n.rn, - }) + n.Logger = clog.WithField("id", n.rn) n.Logger.Infof("%s has debug enabled", n.Name) } else { /* else bind it to the default one (might find something more elegant here)*/ - n.Logger = log.WithFields(log.Fields{ - "id": n.rn, - }) + n.Logger = log.WithField("id", n.rn) } /* display info about top-level nodes, they should be the only one with explicit stage name ?*/ diff --git a/pkg/types/queue.go b/pkg/types/queue.go index d9b737d548f..12a3ab37074 100644 --- a/pkg/types/queue.go +++ b/pkg/types/queue.go @@ -22,7 +22,7 @@ func NewQueue(l int) *Queue { Queue: make([]Event, 0, l), L: l, } - log.WithFields(log.Fields{"Capacity": q.L}).Debugf("Creating queue") + log.WithField("Capacity", q.L).Debugf("Creating queue") return q }