From 4014104f6e6ef112644f493e22bac45ca514674f Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 19 Dec 2023 12:12:10 +0100 Subject: [PATCH 01/21] Log raw events and errors containing events to a separate file This commit introduces a new logger that can be configured through `logging.events` that can be used to log any message that contains the whole event or could contain the whole event. At the moment it is used by the elasticsearch output to log indexing errors containing the whole event and errors returned by Elasticsearch that can potentially contain the whole event. --- go.mod | 2 + go.sum | 4 +- libbeat/cmd/instance/beat.go | 43 +++++++++++++--- libbeat/cmd/test/output.go | 4 +- libbeat/outputs/console/console.go | 1 + libbeat/outputs/elasticsearch/client.go | 22 +++++--- .../outputs/elasticsearch/elasticsearch.go | 51 +++++++++++-------- libbeat/outputs/fileout/file.go | 1 + libbeat/outputs/kafka/kafka.go | 1 + libbeat/outputs/logstash/logstash.go | 2 + libbeat/outputs/output_reg.go | 7 ++- libbeat/outputs/redis/redis.go | 2 + libbeat/outputs/shipper/shipper.go | 1 + libbeat/publisher/pipeline/controller.go | 5 +- libbeat/publisher/pipeline/pipeline.go | 3 +- 15 files changed, 105 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index a7044889fac4..9ca5d766de27 100644 --- a/go.mod +++ b/go.mod @@ -419,3 +419,5 @@ replace ( // Exclude this version because the version has an invalid checksum. exclude github.com/docker/distribution v2.8.0+incompatible + +replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20231220154111-efc1fba83b4b diff --git a/go.sum b/go.sum index 79feea755704..6d202c7825b6 100644 --- a/go.sum +++ b/go.sum @@ -373,6 +373,8 @@ github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56 github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 h1:lxW5Q6K2IisyF5tlr6Ts0W4POGWQZco05MJjFmoeIHs= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20231220154111-efc1fba83b4b h1:GEwwH9rTwJzcHAcdCFkfta0AHbADcTNpxZhL51ASLpo= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20231220154111-efc1fba83b4b/go.mod h1:EbRwBMsWoU4IHGKJlTrxbxC03hkihS9W4h+UgraLdDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= @@ -662,8 +664,6 @@ github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lO github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= -github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= -github.com/elastic/elastic-agent-libs v0.7.5/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index efe8bd48f79a..cff6d7120333 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -125,6 +125,7 @@ type beatConfig struct { BufferConfig *config.C `config:"http.buffer"` Path paths.Path `config:"path"` Logging *config.C `config:"logging"` + EventLogging *config.C `config:"logging.events"` MetricLogging *config.C `config:"logging.metrics"` Keystore *config.C `config:"keystore"` Instrumentation instrumentation.Config `config:"instrumentation"` @@ -378,7 +379,26 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { Logger: logp.L().Named("publisher"), Tracer: b.Instrumentation.Tracer(), } - outputFactory := b.makeOutputFactory(b.Config.Output) + + // Get the default/current logging configuration + // we need some defaults to be populates otherwise Unpack will + // fail + eventsLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + + // merge eventsLoggerCfg with b.Config.Logging, so logging.events.* only + // overwrites logging.* + if err := b.Config.EventLogging.Unpack(&eventsLoggerCfg); err != nil { + return nil, fmt.Errorf("error initialising events logger: %w", err) + } + + // Ensure the default filename is set + if eventsLoggerCfg.Files.Name == "" { + eventsLoggerCfg.Files.Name = b.Info.Beat + // Append the name so the files do not overwrite themselves. + eventsLoggerCfg.Files.Name = eventsLoggerCfg.Files.Name + "-events-data" + } + + outputFactory := b.makeOutputFactory(b.Config.Output, eventsLoggerCfg) settings := pipeline.Settings{ Processors: b.processors, InputQueueSize: b.InputQueueSize, @@ -388,7 +408,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { return nil, fmt.Errorf("error initializing publisher: %w", err) } - reload.RegisterV2.MustRegisterOutput(b.makeOutputReloader(publisher.OutputReloader())) + reload.RegisterV2.MustRegisterOutput(b.makeOutputReloader(publisher.OutputReloader(), eventsLoggerCfg)) // TODO: some beats race on shutdown with publisher.Stop -> do not call Stop yet, // but refine publisher to disconnect clients on stop automatically @@ -784,6 +804,14 @@ func (b *Beat) configure(settings Settings) error { return fmt.Errorf("error unpacking config data: %w", err) } + if b.Config.EventLogging == nil { + b.Config.EventLogging = config.NewConfig() + } + b.Config.EventLogging.Merge(b.Config.Logging) + if _, err := b.Config.EventLogging.Remove("events", -1); err != nil { + return fmt.Errorf("cannot merge logging and logging.events configuration: %w", err) + } + if err := promoteOutputQueueSettings(&b.Config); err != nil { return fmt.Errorf("could not promote output queue settings: %w", err) } @@ -1091,7 +1119,7 @@ func (b *Beat) indexSetupCallback() elasticsearch.ConnectCallback { } } -func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader) reload.Reloadable { +func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader, eventsLoggerCfg logp.Config) reload.Reloadable { return reload.ReloadableFunc(func(update *reload.ConfigWithMeta) error { if update == nil { return nil @@ -1113,15 +1141,16 @@ func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader) reload.Re } } - return outReloader.Reload(update, b.createOutput) + return outReloader.Reload(update, eventsLoggerCfg, b.createOutput) }) } func (b *Beat) makeOutputFactory( cfg config.Namespace, + eventLoggerCfg logp.Config, ) func(outputs.Observer) (string, outputs.Group, error) { return func(outStats outputs.Observer) (string, outputs.Group, error) { - out, err := b.createOutput(outStats, cfg) + out, err := b.createOutput(outStats, cfg, eventLoggerCfg) return cfg.Name(), out, err } } @@ -1217,7 +1246,7 @@ func (b *Beat) reloadOutputOnCertChange(cfg config.Namespace) error { return nil } -func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace) (outputs.Group, error) { +func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace, eventsLoggerCfg logp.Config) (outputs.Group, error) { if !cfg.IsSet() { return outputs.Group{}, nil } @@ -1226,7 +1255,7 @@ func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace) (outpu return outputs.Group{}, fmt.Errorf("could not setup output certificates reloader: %w", err) } - return outputs.Load(b.IdxSupporter, b.Info, stats, cfg.Name(), cfg.Config()) + return outputs.Load(b.IdxSupporter, b.Info, stats, cfg.Name(), cfg.Config(), eventsLoggerCfg) } func (b *Beat) registerClusterUUIDFetching() { diff --git a/libbeat/cmd/test/output.go b/libbeat/cmd/test/output.go index 3290c283c27f..ac7e3ba535a2 100644 --- a/libbeat/cmd/test/output.go +++ b/libbeat/cmd/test/output.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/idxmgmt" "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/testing" ) @@ -41,7 +42,8 @@ func GenTestOutputCmd(settings instance.Settings) *cobra.Command { } im, _ := idxmgmt.DefaultSupport(nil, b.Info, nil) - output, err := outputs.Load(im, b.Info, nil, b.Config.Output.Name(), b.Config.Output.Config()) + // we use an empty config for the events logger because this is just a output test + output, err := outputs.Load(im, b.Info, nil, b.Config.Output.Name(), b.Config.Output.Config(), logp.Config{}) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing output: %s\n", err) os.Exit(1) diff --git a/libbeat/outputs/console/console.go b/libbeat/outputs/console/console.go index b81bf3363486..a7cc4a69e6ad 100644 --- a/libbeat/outputs/console/console.go +++ b/libbeat/outputs/console/console.go @@ -51,6 +51,7 @@ func makeConsole( beat beat.Info, observer outputs.Observer, cfg *config.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { config := defaultConfig err := cfg.Unpack(&config) diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 8aeef2c623e7..b9728fc7bf88 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -55,7 +55,8 @@ type Client struct { observer outputs.Observer NonIndexableAction string - log *logp.Logger + log *logp.Logger + eventsLogger *logp.Logger } // ClientSettings contains the settings for a client. @@ -81,6 +82,8 @@ const ( // NewClient instantiates a new client. func NewClient( + logger *logp.Logger, + eventsLogger *logp.Logger, s ClientSettings, onConnect *callbacksRegistry, ) (*Client, error) { @@ -140,7 +143,8 @@ func NewClient( observer: s.Observer, NonIndexableAction: s.NonIndexableAction, - log: logp.NewLogger("elasticsearch"), + log: logger, + eventsLogger: eventsLogger, } return client, nil @@ -174,6 +178,8 @@ func (client *Client) Clone() *Client { client.conn.Transport.Proxy.Disable = client.conn.Transport.Proxy.URL == nil c, _ := NewClient( + client.log, + client.eventsLogger, ClientSettings{ ConnectionSettings: connection, Index: client.index, @@ -431,12 +437,12 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat result, _ := data[i].Content.Meta.HasKey(dead_letter_marker_field) if result { stats.nonIndexable++ - client.log.Errorf("Can't deliver to dead letter index event (status=%v). Enable debug logs to view the event and cause.", status) - client.log.Debugf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) + client.log.Errorf("Can't deliver to dead letter index event (status=%v). Look for events-data log file to view the event and cause.", status) + client.eventsLogger.Errorf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) // poison pill - this will clog the pipeline if the underlying failure is non transient. } else if client.NonIndexableAction == dead_letter_index { - client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Enable debug logs to view the event and cause.", status) - client.log.Debugf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) + client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Look for events-data log file to view the event and cause.", status) + client.eventsLogger.Warnf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) if data[i].Content.Meta == nil { data[i].Content.Meta = mapstr.M{ dead_letter_marker_field: true, @@ -451,8 +457,8 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat } } else { // drop stats.nonIndexable++ - client.log.Warnf("Cannot index event (status=%v): dropping event! Enable debug logs to view the event and cause.", status) - client.log.Debugf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) + client.log.Warnf("Cannot index event (status=%v): dropping event! Look for events-data log file to view the event and cause.", status) + client.eventsLogger.Warnf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) continue } } diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index 649168eb11b4..c0826330fa32 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/outil" "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" + "go.uber.org/zap" ) func init() { @@ -38,8 +39,13 @@ func makeES( beat beat.Info, observer outputs.Observer, cfg *config.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { log := logp.NewLogger(logSelector) + eventsLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + eventsLogger = log.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + if !cfg.HasField("bulk_max_size") { if err := cfg.SetInt("bulk_max_size", -1, defaultBulkSize); err != nil { return outputs.Fail(err) @@ -110,27 +116,30 @@ func makeES( } var client outputs.NetworkClient - client, err = NewClient(ClientSettings{ - ConnectionSettings: eslegclient.ConnectionSettings{ - URL: esURL, - Beatname: beat.Beat, - Kerberos: esConfig.Kerberos, - Username: esConfig.Username, - Password: esConfig.Password, - APIKey: esConfig.APIKey, - Parameters: params, - Headers: esConfig.Headers, - CompressionLevel: esConfig.CompressionLevel, - Observer: observer, - EscapeHTML: esConfig.EscapeHTML, - Transport: esConfig.Transport, - IdleConnTimeout: esConfig.Transport.IdleConnTimeout, - }, - Index: index, - Pipeline: pipeline, - Observer: observer, - NonIndexableAction: policy.action(), - }, &connectCallbackRegistry) + client, err = NewClient( + log, + eventsLogger, + ClientSettings{ + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: esURL, + Beatname: beat.Beat, + Kerberos: esConfig.Kerberos, + Username: esConfig.Username, + Password: esConfig.Password, + APIKey: esConfig.APIKey, + Parameters: params, + Headers: esConfig.Headers, + CompressionLevel: esConfig.CompressionLevel, + Observer: observer, + EscapeHTML: esConfig.EscapeHTML, + Transport: esConfig.Transport, + IdleConnTimeout: esConfig.Transport.IdleConnTimeout, + }, + Index: index, + Pipeline: pipeline, + Observer: observer, + NonIndexableAction: policy.action(), + }, &connectCallbackRegistry) if err != nil { return outputs.Fail(err) } diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index 4ddc5955d6ef..c058204b9983 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -51,6 +51,7 @@ func makeFileout( beat beat.Info, observer outputs.Observer, cfg *c.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { foConfig := defaultConfig() if err := cfg.Unpack(&foConfig); err != nil { diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index 0c856ea425db..ad69f30477e9 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -43,6 +43,7 @@ func makeKafka( beat beat.Info, observer outputs.Observer, cfg *config.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { log := logp.NewLogger(logSelector) log.Debug("initialize kafka output") diff --git a/libbeat/outputs/logstash/logstash.go b/libbeat/outputs/logstash/logstash.go index 072ec049f6fb..466f6b742f9f 100644 --- a/libbeat/outputs/logstash/logstash.go +++ b/libbeat/outputs/logstash/logstash.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport" "github.com/elastic/elastic-agent-libs/transport/tlscommon" ) @@ -40,6 +41,7 @@ func makeLogstash( beat beat.Info, observer outputs.Observer, cfg *conf.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { lsConfig, err := readConfig(cfg, beat) if err != nil { diff --git a/libbeat/outputs/output_reg.go b/libbeat/outputs/output_reg.go index 3d2675c2ce2e..213daf0298ad 100644 --- a/libbeat/outputs/output_reg.go +++ b/libbeat/outputs/output_reg.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/publisher/queue" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) var outputReg = map[string]Factory{} @@ -32,7 +33,8 @@ type Factory func( im IndexManager, beat beat.Info, stats Observer, - cfg *config.C) (Group, error) + cfg *config.C, + eventsLogger logp.Config) (Group, error) // IndexManager provides additional index related services to the outputs. type IndexManager interface { @@ -81,6 +83,7 @@ func Load( stats Observer, name string, config *config.C, + eventsLoggerCfg logp.Config, ) (Group, error) { factory := FindFactory(name) if factory == nil { @@ -90,5 +93,5 @@ func Load( if stats == nil { stats = NewNilObserver() } - return factory(im, info, stats, config) + return factory(im, info, stats, config, eventsLoggerCfg) } diff --git a/libbeat/outputs/redis/redis.go b/libbeat/outputs/redis/redis.go index 9814d6abee7b..5656cd951717 100644 --- a/libbeat/outputs/redis/redis.go +++ b/libbeat/outputs/redis/redis.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/codec" "github.com/elastic/beats/v7/libbeat/outputs/outil" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport" "github.com/elastic/elastic-agent-libs/transport/tlscommon" ) @@ -51,6 +52,7 @@ func makeRedis( beat beat.Info, observer outputs.Observer, cfg *config.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { if !cfg.HasField("index") { diff --git a/libbeat/outputs/shipper/shipper.go b/libbeat/outputs/shipper/shipper.go index fe19a36b31d2..bf0e77691f9e 100644 --- a/libbeat/outputs/shipper/shipper.go +++ b/libbeat/outputs/shipper/shipper.go @@ -92,6 +92,7 @@ func makeShipper( beat beat.Info, observer outputs.Observer, cfg *conf.C, + eventsLoggerCfg logp.Config, ) (outputs.Group, error) { config := defaultConfig() diff --git a/libbeat/publisher/pipeline/controller.go b/libbeat/publisher/pipeline/controller.go index 1c480c01bce2..bcaaca438fd7 100644 --- a/libbeat/publisher/pipeline/controller.go +++ b/libbeat/publisher/pipeline/controller.go @@ -180,7 +180,8 @@ func (c *outputController) Set(outGrp outputs.Group) { // Reload the output func (c *outputController) Reload( cfg *reload.ConfigWithMeta, - outFactory func(outputs.Observer, conf.Namespace) (outputs.Group, error), + eventsLoggerCfg logp.Config, + outFactory func(outputs.Observer, conf.Namespace, logp.Config) (outputs.Group, error), ) error { outCfg := conf.Namespace{} if cfg != nil { @@ -191,7 +192,7 @@ func (c *outputController) Reload( output, err := loadOutput(c.monitors, func(stats outputs.Observer) (string, outputs.Group, error) { name := outCfg.Name() - out, err := outFactory(stats, outCfg) + out, err := outFactory(stats, outCfg, eventsLoggerCfg) return name, out, err }) if err != nil { diff --git a/libbeat/publisher/pipeline/pipeline.go b/libbeat/publisher/pipeline/pipeline.go index cf03163750ee..7076b379fe58 100644 --- a/libbeat/publisher/pipeline/pipeline.go +++ b/libbeat/publisher/pipeline/pipeline.go @@ -111,7 +111,8 @@ const ( type OutputReloader interface { Reload( cfg *reload.ConfigWithMeta, - factory func(outputs.Observer, conf.Namespace) (outputs.Group, error), + eventsLoggerCfg logp.Config, + factory func(outputs.Observer, conf.Namespace, logp.Config) (outputs.Group, error), ) error } From 6f165bd4ddac7e49db4c7147fdc69db3d244fe20 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 21 Dec 2023 09:04:31 +0100 Subject: [PATCH 02/21] fix tests --- libbeat/outputs/elasticsearch/client_integration_test.go | 2 +- libbeat/outputs/elasticsearch/client_proxy_test.go | 3 ++- libbeat/outputs/elasticsearch/elasticsearch_test.go | 2 ++ libbeat/outputs/kafka/kafka_integration_test.go | 2 +- libbeat/outputs/logstash/logstash_integration_test.go | 3 ++- libbeat/outputs/logstash/logstash_test.go | 3 ++- libbeat/outputs/redis/redis_integration_test.go | 3 ++- libbeat/outputs/redis/redis_test.go | 3 ++- libbeat/outputs/shipper/shipper_test.go | 2 ++ libbeat/publisher/pipeline/stress/run.go | 2 +- 10 files changed, 17 insertions(+), 8 deletions(-) diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 7a8a06beccaf..58f813af7fa6 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -422,7 +422,7 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu info := beat.Info{Beat: "libbeat"} // disable ILM if using specified index name im, _ := idxmgmt.DefaultSupport(nil, info, conf.MustNewConfigFrom(map[string]interface{}{"setup.ilm.enabled": "false"})) - output, err := makeES(im, info, stats, config) + output, err := makeES(im, info, stats, config, logp.Config{}) if err != nil { t.Fatal(err) } diff --git a/libbeat/outputs/elasticsearch/client_proxy_test.go b/libbeat/outputs/elasticsearch/client_proxy_test.go index e3fd914bbe7d..9898e38c58df 100644 --- a/libbeat/outputs/elasticsearch/client_proxy_test.go +++ b/libbeat/outputs/elasticsearch/client_proxy_test.go @@ -36,6 +36,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/atomic" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/outputs/outil" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) @@ -204,7 +205,7 @@ func doClientPing(t *testing.T) { clientSettings.Transport.Proxy.URL = &proxyURL } - client, err := NewClient(clientSettings, nil) + client, err := NewClient(logp.L(), logp.L(), clientSettings, nil) require.NoError(t, err) // This ping won't succeed; we aren't testing end-to-end communication diff --git a/libbeat/outputs/elasticsearch/elasticsearch_test.go b/libbeat/outputs/elasticsearch/elasticsearch_test.go index 45db313d903c..1b5dcafc1e99 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch_test.go +++ b/libbeat/outputs/elasticsearch/elasticsearch_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -125,6 +126,7 @@ func TestPipelineSelection(t *testing.T) { Pipeline: &selector, }, nil, + logp.Config{}, ) assert.NoError(t, err) diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index 29fc72ac8590..b6e5be46d0ab 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -258,7 +258,7 @@ func TestKafkaPublish(t *testing.T) { } t.Run(name, func(t *testing.T) { - grp, err := makeKafka(nil, beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, outputs.NewNilObserver(), cfg) + grp, err := makeKafka(nil, beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, outputs.NewNilObserver(), cfg, logp.Config{}) if err != nil { t.Fatal(err) } diff --git a/libbeat/outputs/logstash/logstash_integration_test.go b/libbeat/outputs/logstash/logstash_integration_test.go index 2cfbcd03974c..fe44ff92bf03 100644 --- a/libbeat/outputs/logstash/logstash_integration_test.go +++ b/libbeat/outputs/logstash/logstash_integration_test.go @@ -39,6 +39,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/outest" "github.com/elastic/beats/v7/libbeat/outputs/outil" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) @@ -193,7 +194,7 @@ func newTestElasticsearchOutput(t *testing.T, test string) *testOutputer { t.Fatal("init index management:", err) } - grp, err := plugin(im, info, outputs.NewNilObserver(), config) + grp, err := plugin(im, info, outputs.NewNilObserver(), config, logp.Config{}) if err != nil { t.Fatalf("init elasticsearch output plugin failed: %v", err) } diff --git a/libbeat/outputs/logstash/logstash_test.go b/libbeat/outputs/logstash/logstash_test.go index fa1b57fb841b..226515d10363 100644 --- a/libbeat/outputs/logstash/logstash_test.go +++ b/libbeat/outputs/logstash/logstash_test.go @@ -32,6 +32,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/outest" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" v2 "github.com/elastic/go-lumber/server/v2" ) @@ -181,7 +182,7 @@ func newTestLumberjackOutput( } cfg, _ := conf.NewConfigFrom(config) - grp, err := outputs.Load(nil, beat.Info{}, nil, "logstash", cfg) + grp, err := outputs.Load(nil, beat.Info{}, nil, "logstash", cfg, logp.Config{}) if err != nil { t.Fatalf("init logstash output plugin failed: %v", err) } diff --git a/libbeat/outputs/redis/redis_integration_test.go b/libbeat/outputs/redis/redis_integration_test.go index dfd48dc75d23..3627203f7c1e 100644 --- a/libbeat/outputs/redis/redis_integration_test.go +++ b/libbeat/outputs/redis/redis_integration_test.go @@ -37,6 +37,7 @@ import ( _ "github.com/elastic/beats/v7/libbeat/outputs/codec/json" "github.com/elastic/beats/v7/libbeat/outputs/outest" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -330,7 +331,7 @@ func newRedisTestingOutput(t *testing.T, cfg map[string]interface{}) outputs.Cli t.Fatalf("redis output module not registered") } - out, err := plugin(nil, beat.Info{Beat: testBeatname, Version: testBeatversion}, outputs.NewNilObserver(), config) + out, err := plugin(nil, beat.Info{Beat: testBeatname, Version: testBeatversion}, outputs.NewNilObserver(), config, logp.Config{}) if err != nil { t.Fatalf("Failed to initialize redis output: %v", err) } diff --git a/libbeat/outputs/redis/redis_test.go b/libbeat/outputs/redis/redis_test.go index 6e9d70f57860..7640a2957c06 100644 --- a/libbeat/outputs/redis/redis_test.go +++ b/libbeat/outputs/redis/redis_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" _ "github.com/elastic/beats/v7/libbeat/outputs/codec/json" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -108,7 +109,7 @@ func TestMakeRedis(t *testing.T) { t.Run(name, func(t *testing.T) { cfg, err := config.NewConfigFrom(test.config) assert.NoError(t, err) - groups, err := makeRedis(nil, beatInfo, outputs.NewNilObserver(), cfg) + groups, err := makeRedis(nil, beatInfo, outputs.NewNilObserver(), cfg, logp.Config{}) assert.Equal(t, err == nil, test.valid) if err != nil && test.valid { t.Log(err) diff --git a/libbeat/outputs/shipper/shipper_test.go b/libbeat/outputs/shipper/shipper_test.go index e26d44635aff..53d55c5a45b6 100644 --- a/libbeat/outputs/shipper/shipper_test.go +++ b/libbeat/outputs/shipper/shipper_test.go @@ -42,6 +42,7 @@ import ( "github.com/elastic/beats/v7/libbeat/publisher" "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-shipper-client/pkg/helpers" pb "github.com/elastic/elastic-agent-shipper-client/pkg/proto" @@ -583,6 +584,7 @@ func createShipperClient(t *testing.T, cfg *config.C, observer outputs.Observer) beat.Info{Beat: "libbeat", IndexPrefix: "testbeat"}, observer, cfg, + logp.Config{}, ) require.NoError(t, err) require.Len(t, group.Clients, 1) diff --git a/libbeat/publisher/pipeline/stress/run.go b/libbeat/publisher/pipeline/stress/run.go index ee118d502eba..622f303173df 100644 --- a/libbeat/publisher/pipeline/stress/run.go +++ b/libbeat/publisher/pipeline/stress/run.go @@ -76,7 +76,7 @@ func RunTests( processing, func(stat outputs.Observer) (string, outputs.Group, error) { cfg := config.Output - out, err := outputs.Load(nil, info, stat, cfg.Name(), cfg.Config()) + out, err := outputs.Load(nil, info, stat, cfg.Name(), cfg.Config(), logp.Config{}) return cfg.Name(), out, err }, ) From 5d60d60bbe00b0052caddc1b48af84a28851f941 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 21 Dec 2023 11:56:31 +0100 Subject: [PATCH 03/21] fix more tests --- go.mod | 2 +- go.sum | 4 +- libbeat/cmd/instance/beat_test.go | 6 +- .../elasticsearch/client_integration_test.go | 10 ++- libbeat/outputs/elasticsearch/client_test.go | 66 ++++++++++++++----- .../elasticsearch/elasticsearch_test.go | 3 +- libbeat/publisher/pipeline/stress/out.go | 3 +- 7 files changed, 69 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 9ca5d766de27..332ceba55f74 100644 --- a/go.mod +++ b/go.mod @@ -420,4 +420,4 @@ replace ( // Exclude this version because the version has an invalid checksum. exclude github.com/docker/distribution v2.8.0+incompatible -replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20231220154111-efc1fba83b4b +replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20231221105324-aedb70a4f832 diff --git a/go.sum b/go.sum index 6d202c7825b6..169361563025 100644 --- a/go.sum +++ b/go.sum @@ -373,8 +373,8 @@ github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56 github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 h1:lxW5Q6K2IisyF5tlr6Ts0W4POGWQZco05MJjFmoeIHs= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= -github.com/belimawr/elastic-agent-libs v0.2.9-0.20231220154111-efc1fba83b4b h1:GEwwH9rTwJzcHAcdCFkfta0AHbADcTNpxZhL51ASLpo= -github.com/belimawr/elastic-agent-libs v0.2.9-0.20231220154111-efc1fba83b4b/go.mod h1:EbRwBMsWoU4IHGKJlTrxbxC03hkihS9W4h+UgraLdDM= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20231221105324-aedb70a4f832 h1:hCPNCDrtpZg8GekH7RptPcJ9C/Dgr2ebku2lETqFFw0= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20231221105324-aedb70a4f832/go.mod h1:EbRwBMsWoU4IHGKJlTrxbxC03hkihS9W4h+UgraLdDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= diff --git a/libbeat/cmd/instance/beat_test.go b/libbeat/cmd/instance/beat_test.go index 52e55941225d..0ee30cdcc720 100644 --- a/libbeat/cmd/instance/beat_test.go +++ b/libbeat/cmd/instance/beat_test.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/publisher/queue/memqueue" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-ucfg/yaml" "github.com/gofrs/uuid" @@ -247,7 +248,7 @@ elasticsearch: update := &reload.ConfigWithMeta{Config: c} m := &outputReloaderMock{} - reloader := b.makeOutputReloader(m) + reloader := b.makeOutputReloader(m, logp.Config{}) require.False(t, b.Config.Output.IsSet(), "the output should not be set yet") require.True(t, b.isConnectionToOlderVersionAllowed(), "allow_older_versions flag should be true from 8.11") @@ -266,7 +267,8 @@ type outputReloaderMock struct { func (r *outputReloaderMock) Reload( cfg *reload.ConfigWithMeta, - factory func(o outputs.Observer, cfg config.Namespace) (outputs.Group, error), + eventsLoggerCfg logp.Config, + factory func(o outputs.Observer, cfg config.Namespace, eventsLoggerCfg logp.Config) (outputs.Group, error), ) error { r.cfg = cfg return nil diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 58f813af7fa6..4322ad13412e 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -422,7 +422,15 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu info := beat.Info{Beat: "libbeat"} // disable ILM if using specified index name im, _ := idxmgmt.DefaultSupport(nil, info, conf.MustNewConfigFrom(map[string]interface{}{"setup.ilm.enabled": "false"})) - output, err := makeES(im, info, stats, config, logp.Config{}) + + // Creates the events logger configuration for testing + // It used the default one but logs to stderr instead of a file + eventsLoggerCfg := logp.DefaultConfig(logp.DefaultEnvironment) + eventsLoggerCfg.Level = logp.DebugLevel + eventsLoggerCfg.ToStderr = true + eventsLoggerCfg.ToFiles = false + + output, err := makeES(im, info, stats, config, eventsLoggerCfg) if err != nil { t.Fatal(err) } diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index 58e5f3ee5e20..ed4d1a67fb2b 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -90,6 +90,8 @@ func (bm *batchMock) RetryEvents(events []publisher.Event) { func TestPublish(t *testing.T) { makePublishTestClient := func(t *testing.T, url string) *Client { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), ConnectionSettings: eslegclient.ConnectionSettings{URL: url}, @@ -248,6 +250,8 @@ func TestPublish(t *testing.T) { func TestCollectPublishFailsNone(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -272,6 +276,8 @@ func TestCollectPublishFailsNone(t *testing.T) { func TestCollectPublishFailMiddle(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -302,6 +308,8 @@ func TestCollectPublishFailMiddle(t *testing.T) { func TestCollectPublishFailDeadLetterQueue(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "dead_letter_index", @@ -361,6 +369,8 @@ func TestCollectPublishFailDeadLetterQueue(t *testing.T) { func TestCollectPublishFailDrop(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -405,6 +415,8 @@ func TestCollectPublishFailDrop(t *testing.T) { func TestCollectPublishFailAll(t *testing.T) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -434,6 +446,8 @@ func TestCollectPipelinePublishFail(t *testing.T) { logp.TestingSetup(logp.WithSelectors("elasticsearch")) client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -481,6 +495,8 @@ func TestCollectPipelinePublishFail(t *testing.T) { func BenchmarkCollectPublishFailsNone(b *testing.B) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -510,6 +526,8 @@ func BenchmarkCollectPublishFailsNone(b *testing.B) { func BenchmarkCollectPublishFailMiddle(b *testing.B) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -540,6 +558,8 @@ func BenchmarkCollectPublishFailMiddle(b *testing.B) { func BenchmarkCollectPublishFailAll(b *testing.B) { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), NonIndexableAction: "drop", @@ -589,17 +609,20 @@ func TestClientWithHeaders(t *testing.T) { })) defer ts.Close() - client, err := NewClient(ClientSettings{ - Observer: outputs.NewNilObserver(), - ConnectionSettings: eslegclient.ConnectionSettings{ - URL: ts.URL, - Headers: map[string]string{ - "host": "myhost.local", - "X-Test": "testing value", + client, err := NewClient( + logp.L(), + logp.L(), + ClientSettings{ + Observer: outputs.NewNilObserver(), + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: ts.URL, + Headers: map[string]string{ + "host": "myhost.local", + "X-Test": "testing value", + }, }, - }, - Index: outil.MakeSelector(outil.ConstSelectorExpr("test", outil.SelectorLowerCase)), - }, nil) + Index: outil.MakeSelector(outil.ConstSelectorExpr("test", outil.SelectorLowerCase)), + }, nil) assert.NoError(t, err) // simple ping @@ -667,6 +690,8 @@ func TestBulkEncodeEvents(t *testing.T) { } client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), Index: index, @@ -743,6 +768,8 @@ func TestBulkEncodeEventsWithOpType(t *testing.T) { } client, _ := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), Index: index, @@ -786,13 +813,16 @@ func TestClientWithAPIKey(t *testing.T) { })) defer ts.Close() - client, err := NewClient(ClientSettings{ - Observer: outputs.NewNilObserver(), - ConnectionSettings: eslegclient.ConnectionSettings{ - URL: ts.URL, - APIKey: "hyokHG4BfWk5viKZ172X:o45JUkyuS--yiSAuuxl8Uw", - }, - }, nil) + client, err := NewClient( + logp.L(), + logp.L(), + ClientSettings{ + Observer: outputs.NewNilObserver(), + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: ts.URL, + APIKey: "hyokHG4BfWk5viKZ172X:o45JUkyuS--yiSAuuxl8Uw", + }, + }, nil) assert.NoError(t, err) // This connection will fail since the server doesn't return a valid @@ -806,6 +836,8 @@ func TestClientWithAPIKey(t *testing.T) { func TestPublishEventsWithBulkFiltering(t *testing.T) { makePublishTestClient := func(t *testing.T, url string, configParams map[string]string) *Client { client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Observer: outputs.NewNilObserver(), ConnectionSettings: eslegclient.ConnectionSettings{ diff --git a/libbeat/outputs/elasticsearch/elasticsearch_test.go b/libbeat/outputs/elasticsearch/elasticsearch_test.go index 1b5dcafc1e99..09f4ad0bf46e 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch_test.go +++ b/libbeat/outputs/elasticsearch/elasticsearch_test.go @@ -122,11 +122,12 @@ func TestPipelineSelection(t *testing.T) { selector, err := buildPipelineSelector(config.MustNewConfigFrom(test.cfg)) client, err := NewClient( + logp.L(), + logp.L(), ClientSettings{ Pipeline: &selector, }, nil, - logp.Config{}, ) assert.NoError(t, err) diff --git a/libbeat/publisher/pipeline/stress/out.go b/libbeat/publisher/pipeline/stress/out.go index d1014b8d782b..fc51f24e7d57 100644 --- a/libbeat/publisher/pipeline/stress/out.go +++ b/libbeat/publisher/pipeline/stress/out.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/publisher" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) type testOutput struct { @@ -55,7 +56,7 @@ func init() { outputs.RegisterType("test", makeTestOutput) } -func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *conf.C) (outputs.Group, error) { +func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *conf.C, eventsLoggerCfg logp.Config) (outputs.Group, error) { config := defaultTestOutputConfig if err := cfg.Unpack(&config); err != nil { return outputs.Fail(err) From 64b0f0e09a36b6171903a49a42fd9f0fbb67cff4 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 21 Dec 2023 17:51:57 +0100 Subject: [PATCH 04/21] fix dockerlogbeat --- .../dockerlogbeat/pipelinemanager/libbeattools.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go index de3436156b42..774feda6765c 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go +++ b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/logp/configure" ) // load pipeline starts up a new pipeline with the given config @@ -66,6 +67,16 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * Processors: processing, } + // Get the default/current logging configuration + // we need some defaults to be populates otherwise Unpack will + // fail + eventsLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + + // Ensure the default filename is set + if eventsLoggerCfg.Files.Name == "" { + eventsLoggerCfg.Files.Name = "dockerlogbeat-events-data" + } + pipeline, err := pipeline.LoadWithSettings( info, pipeline.Monitors{ @@ -76,7 +87,7 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * pipelineCfg, func(stat outputs.Observer) (string, outputs.Group, error) { cfg := config.Output - out, err := outputs.Load(idxMgr, info, stat, cfg.Name(), cfg.Config()) + out, err := outputs.Load(idxMgr, info, stat, cfg.Name(), cfg.Config(), eventsLoggerCfg) return cfg.Name(), out, err }, settings, From 896d11c8dbb8bd6160186b77f26b157456b2766c Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Fri, 22 Dec 2023 10:35:33 +0100 Subject: [PATCH 05/21] Fix ES and use events logger in all outputs --- .../outputs/elasticsearch/elasticsearch.go | 2 +- libbeat/outputs/fileout/file.go | 28 +++++++---- libbeat/outputs/kafka/client.go | 49 +++++++++++-------- libbeat/outputs/kafka/kafka.go | 2 +- libbeat/outputs/redis/client.go | 44 ++++++++++------- libbeat/outputs/redis/redis.go | 2 +- 6 files changed, 77 insertions(+), 50 deletions(-) diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index c0826330fa32..268540a5676c 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -44,7 +44,7 @@ func makeES( log := logp.NewLogger(logSelector) eventsLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = log.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) if !cfg.HasField("bulk_max_size") { if err := cfg.SetInt("bulk_max_size", -1, defaultBulkSize); err != nil { diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index c058204b9983..4c20c924ee0e 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -30,6 +30,7 @@ import ( c "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent-libs/logp" + "go.uber.org/zap" ) func init() { @@ -37,12 +38,13 @@ func init() { } type fileOutput struct { - log *logp.Logger - filePath string - beat beat.Info - observer outputs.Observer - rotator *file.Rotator - codec codec.Codec + log *logp.Logger + eventsLogger *logp.Logger + filePath string + beat beat.Info + observer outputs.Observer + rotator *file.Rotator + codec codec.Codec } // makeFileout instantiates a new file output instance. @@ -61,10 +63,15 @@ func makeFileout( // disable bulk support in publisher pipeline _ = cfg.SetInt("bulk_max_size", -1, -1) + logSelector := "file" + eventsLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) fo := &fileOutput{ - log: logp.NewLogger("file"), - beat: beat, - observer: observer, + log: logp.NewLogger(logSelector), + eventsLogger: eventsLogger, + beat: beat, + observer: observer, } if err := fo.init(beat, foConfig); err != nil { return outputs.Fail(err) @@ -132,7 +139,8 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { } else { out.log.Warnf("Failed to serialize the event: %+v", err) } - out.log.Debugf("Failed event: %v", event) + out.log.Debug("Event logged to events-data log file") + out.eventsLogger.Debugf("Failed event: %v", event) dropped++ continue diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 24bbc61145d4..6acebdfab9a0 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -28,6 +28,7 @@ import ( "github.com/Shopify/sarama" "github.com/eapache/go-resiliency/breaker" + "go.uber.org/zap" "github.com/elastic/beats/v7/libbeat/common/fmtstr" "github.com/elastic/beats/v7/libbeat/outputs" @@ -40,16 +41,17 @@ import ( ) type client struct { - log *logp.Logger - observer outputs.Observer - hosts []string - topic outil.Selector - key *fmtstr.EventFormatString - index string - codec codec.Codec - config sarama.Config - mux sync.Mutex - done chan struct{} + log *logp.Logger + eventsLogger *logp.Logger + observer outputs.Observer + hosts []string + topic outil.Selector + key *fmtstr.EventFormatString + index string + codec codec.Codec + config sarama.Config + mux sync.Mutex + done chan struct{} producer sarama.AsyncProducer @@ -81,17 +83,23 @@ func newKafkaClient( headers []header, writer codec.Codec, cfg *sarama.Config, + eventsLoggerCfg logp.Config, ) (*client, error) { + eventsLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + c := &client{ - log: logp.NewLogger(logSelector), - observer: observer, - hosts: hosts, - topic: topic, - key: key, - index: strings.ToLower(index), - codec: writer, - config: *cfg, - done: make(chan struct{}), + log: logp.NewLogger(logSelector), + eventsLogger: eventsLogger, + observer: observer, + hosts: hosts, + topic: topic, + key: key, + index: strings.ToLower(index), + codec: writer, + config: *cfg, + done: make(chan struct{}), } if len(headers) != 0 { @@ -228,7 +236,8 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { serializedEvent, err := c.codec.Encode(c.index, event) if err != nil { if c.log.IsDebug() { - c.log.Debugf("failed event: %v", event) + c.eventsLogger.Debugf("failed event: %v", event) + c.log.Debug("failed event logged to events logger file") } return nil, err } diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index ad69f30477e9..524a51bafe2d 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -73,7 +73,7 @@ func makeKafka( return outputs.Fail(err) } - client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, kConfig.Key, topic, kConfig.Headers, codec, libCfg) + client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, kConfig.Key, topic, kConfig.Headers, codec, libCfg, eventsLoggerCfg) if err != nil { return outputs.Fail(err) } diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index 5a299749aac8..a25e2750b6a1 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -26,6 +26,7 @@ import ( "time" "github.com/gomodule/redigo/redis" + "go.uber.org/zap" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" @@ -47,7 +48,8 @@ type publishFn func( ) ([]publisher.Event, error) type client struct { - log *logp.Logger + log *logp.Logger + eventsLogger *logp.Logger *transport.Client observer outputs.Observer index string @@ -74,18 +76,25 @@ func newClient( pass string, db int, key outil.Selector, dt redisDataType, index string, codec codec.Codec, + eventsLoggerCfg logp.Config, ) *client { + logSelector := "redis" + eventsLogger := logp.NewLogger(logSelector) + // Set a new Output so it writes to a different file than `log` + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + return &client{ - log: logp.NewLogger("redis"), - Client: tc, - observer: observer, - timeout: timeout, - password: pass, - index: strings.ToLower(index), - db: db, - dataType: dt, - key: key, - codec: codec, + log: logp.NewLogger(logSelector), + eventsLogger: eventsLogger, + Client: tc, + observer: observer, + timeout: timeout, + password: pass, + index: strings.ToLower(index), + db: db, + dataType: dt, + key: key, + codec: codec, } } @@ -227,7 +236,7 @@ func (c *client) publishEventsBulk(conn redis.Conn, command string) publishFn { args := make([]interface{}, 1, len(data)+1) args[0] = dest - okEvents, args := serializeEvents(c.log, args, 1, data, c.index, c.codec) + okEvents, args := serializeEvents(c.log, c.eventsLogger, args, 1, data, c.index, c.codec) c.observer.Dropped(len(data) - len(okEvents)) if (len(args) - 1) == 0 { return nil, nil @@ -253,7 +262,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF return func(key outil.Selector, data []publisher.Event) ([]publisher.Event, error) { var okEvents []publisher.Event serialized := make([]interface{}, 0, len(data)) - okEvents, serialized = serializeEvents(c.log, serialized, 0, data, c.index, c.codec) + okEvents, serialized = serializeEvents(c.log, c.eventsLogger, serialized, 0, data, c.index, c.codec) c.observer.Dropped(len(data) - len(okEvents)) if len(serialized) == 0 { return nil, nil @@ -308,6 +317,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF func serializeEvents( log *logp.Logger, + eventsLogger *logp.Logger, to []interface{}, i int, data []publisher.Event, @@ -319,8 +329,8 @@ func serializeEvents( for _, d := range data { serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v", err) - log.Debugf("Failed event: %v", d.Content) + log.Errorf("Encoding event failed with error: %+v. Look for events-data log file to view the event", err) + eventsLogger.Debugf("Failed event: %v", d.Content) goto failLoop } @@ -337,8 +347,8 @@ failLoop: for _, d := range rest { serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v", err) - log.Debugf("Failed event: %v", d.Content) + log.Errorf("Encoding event failed with error: %+v. Look for events-data log file to view the event", err) + eventsLogger.Debugf("Failed event: %v", d.Content) i++ continue } diff --git a/libbeat/outputs/redis/redis.go b/libbeat/outputs/redis/redis.go index 5656cd951717..5f902620a2ab 100644 --- a/libbeat/outputs/redis/redis.go +++ b/libbeat/outputs/redis/redis.go @@ -163,7 +163,7 @@ func makeRedis( } client := newClient(conn, observer, rConfig.Timeout, - pass, rConfig.Db, key, dataType, rConfig.Index, enc) + pass, rConfig.Db, key, dataType, rConfig.Index, enc, eventsLoggerCfg) clients[i] = newBackoffClient(client, rConfig.Backoff.Init, rConfig.Backoff.Max) } From 9df5b794849df86e322a7992eb5aa52ca79f126a Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 16 Jan 2024 14:40:32 +0100 Subject: [PATCH 06/21] Update documentation and notice Update documentation and notice file --- NOTICE.txt | 6 +- auditbeat/auditbeat.reference.yml | 36 ++++++++++ auditbeat/auditbeat.yml | 14 ++++ filebeat/filebeat.reference.yml | 36 ++++++++++ filebeat/filebeat.yml | 14 ++++ go.mod | 2 +- go.sum | 4 +- heartbeat/heartbeat.reference.yml | 36 ++++++++++ heartbeat/heartbeat.yml | 14 ++++ .../_meta/config/logging.reference.yml.tmpl | 36 ++++++++++ libbeat/_meta/config/logging.yml.tmpl | 14 ++++ libbeat/docs/loggingconfig.asciidoc | 70 +++++++++++++++++++ .../outputs/elasticsearch/elasticsearch.go | 3 +- libbeat/outputs/fileout/file.go | 3 +- metricbeat/metricbeat.reference.yml | 36 ++++++++++ metricbeat/metricbeat.yml | 14 ++++ packetbeat/packetbeat.reference.yml | 36 ++++++++++ packetbeat/packetbeat.yml | 14 ++++ winlogbeat/winlogbeat.reference.yml | 36 ++++++++++ winlogbeat/winlogbeat.yml | 14 ++++ x-pack/auditbeat/auditbeat.reference.yml | 36 ++++++++++ x-pack/auditbeat/auditbeat.yml | 14 ++++ x-pack/filebeat/filebeat.reference.yml | 36 ++++++++++ x-pack/filebeat/filebeat.yml | 14 ++++ .../functionbeat/functionbeat.reference.yml | 36 ++++++++++ x-pack/functionbeat/functionbeat.yml | 14 ++++ x-pack/heartbeat/heartbeat.reference.yml | 36 ++++++++++ x-pack/heartbeat/heartbeat.yml | 14 ++++ x-pack/metricbeat/metricbeat.reference.yml | 36 ++++++++++ x-pack/metricbeat/metricbeat.yml | 14 ++++ x-pack/osquerybeat/osquerybeat.reference.yml | 36 ++++++++++ x-pack/osquerybeat/osquerybeat.yml | 14 ++++ x-pack/packetbeat/packetbeat.reference.yml | 36 ++++++++++ x-pack/packetbeat/packetbeat.yml | 14 ++++ x-pack/winlogbeat/winlogbeat.reference.yml | 36 ++++++++++ x-pack/winlogbeat/winlogbeat.yml | 14 ++++ 36 files changed, 830 insertions(+), 8 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index c803ff33e8ea..fdd11f31b1cc 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12700,12 +12700,12 @@ SOFTWARE -------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-agent-libs -Version: v0.7.5 +Dependency : github.com/belimawr/elastic-agent-libs +Version: v0.2.9-0.20240116105334-25f61a14ad41 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/belimawr/elastic-agent-libs@v0.2.9-0.20240116105334-25f61a14ad41/LICENSE: Apache License Version 2.0, January 2004 diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 883760ab410b..0e0530db6b0c 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1544,6 +1544,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/auditbeat/auditbeat.yml b/auditbeat/auditbeat.yml index eb87fec7e7e8..e882ac93aaff 100644 --- a/auditbeat/auditbeat.yml +++ b/auditbeat/auditbeat.yml @@ -169,6 +169,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-events-data + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 755db3726e7e..7cf1f54d1616 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2640,6 +2640,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index aa50779b9221..3add6f54a11a 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -186,6 +186,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-events-data + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/go.mod b/go.mod index 332ceba55f74..5c9e344d89e1 100644 --- a/go.mod +++ b/go.mod @@ -420,4 +420,4 @@ replace ( // Exclude this version because the version has an invalid checksum. exclude github.com/docker/distribution v2.8.0+incompatible -replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20231221105324-aedb70a4f832 +replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20240116105334-25f61a14ad41 diff --git a/go.sum b/go.sum index 169361563025..e0c506e96c0a 100644 --- a/go.sum +++ b/go.sum @@ -373,8 +373,8 @@ github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56 github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 h1:lxW5Q6K2IisyF5tlr6Ts0W4POGWQZco05MJjFmoeIHs= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= -github.com/belimawr/elastic-agent-libs v0.2.9-0.20231221105324-aedb70a4f832 h1:hCPNCDrtpZg8GekH7RptPcJ9C/Dgr2ebku2lETqFFw0= -github.com/belimawr/elastic-agent-libs v0.2.9-0.20231221105324-aedb70a4f832/go.mod h1:EbRwBMsWoU4IHGKJlTrxbxC03hkihS9W4h+UgraLdDM= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20240116105334-25f61a14ad41 h1:4kwfzIBmNATT0es3HsgZP7W4p6OUo1TCOk5qchsUzTs= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20240116105334-25f61a14ad41/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 2b2f28382e91..37e20655fef2 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1636,6 +1636,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index 8accb212db4b..0b28eec374e0 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -152,6 +152,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-events-data + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index 660bbb73a02a..d43818aa743c 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -67,3 +67,39 @@ logging.files: # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true + +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/{{.BeatName}} + + # The name of the files where the logs are written to. + #name: {{.BeatName}}-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true diff --git a/libbeat/_meta/config/logging.yml.tmpl b/libbeat/_meta/config/logging.yml.tmpl index 00227ad0cdfd..7fe93c9fc0a1 100644 --- a/libbeat/_meta/config/logging.yml.tmpl +++ b/libbeat/_meta/config/logging.yml.tmpl @@ -8,3 +8,17 @@ # To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] + +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/{{.BeatName}} + + # The name of the files where the logs are written to. + #name: {{.BeatName}}-events-data diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index 4ba73c1b60db..b6f3eef9cbb9 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -293,3 +293,73 @@ Below are some samples: `2017-12-17T18:54:16.242-0500 INFO [example] logp/core_test.go:16 some message` `2017-12-17T18:54:16.242-0500 INFO [example] logp/core_test.go:19 some message {"x": 1}` + +ifndef::serverless[] +[float] +=== Configuration options for events logger + +Some outputs will log raw events on errors like indexing errors in the +Elasticsearch output, to prevent logging raw events together with other +log messages, a different log file, only for log entries containing raw events, +is used. It will use the same level, selectors and all other configurations +from the default logger, but it will have it's own file configuration. + +[float] +==== `logging.events.files.path` + +The directory that log files are written to. The default is the logs path. See +the <> section for details. + +[float] +==== `logging.events.files.name` + +The name of the file that logs are written to. The default is '{beatname_lc}'. + +[float] +==== `logging.events.files.rotateeverybytes` + +The maximum size of a log file. If the limit is reached, a new log file is +generated. The default size limit is 10485760 (10 MB). + +[float] +==== `logging.events.files.keepfiles` + +The number of most recent rotated log files to keep on disk. Older files are +deleted during log rotation. The default value is 7. The `keepfiles` options has +to be in the range of 2 to 1024 files. + +[float] +==== `logging.events.files.permissions` + +The permissions mask to apply when rotating log files. The default value is +0600. The `permissions` option must be a valid Unix-style file permissions mask +expressed in octal notation. In Go, numbers in octal notation must start with +'0'. + +The most permissive mask allowed is 0640. If a higher permissions mask is +specified via this setting, it will be subject to an umask of 0027. + +This option is not supported on Windows. + +Examples: + +* 0640: give read and write access to the file owner, and read access to members of the group associated with the file. +* 0600: give read and write access to the file owner, and no access to all others. + +[float] +==== `logging.events.files.interval` + +Enable log file rotation on time intervals in addition to size-based rotation. +Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h +are boundary-aligned with minutes, hours, days, weeks, months, and years as +reported by the local system clock. All other intervals are calculated from the +unix epoch. Defaults to disabled. + +[float] +==== `logging.events.files.rotateonstartup` + +If the log file already exists on startup, immediately rotate it and start +writing to a new file instead of appending to the existing one. Defaults to +true. +endif::serverless[] + diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index 268540a5676c..3fd89bd7c581 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -18,6 +18,8 @@ package elasticsearch import ( + "go.uber.org/zap" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" @@ -25,7 +27,6 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/outil" "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" - "go.uber.org/zap" ) func init() { diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index 4c20c924ee0e..884fa0a054b3 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -23,6 +23,8 @@ import ( "path/filepath" "time" + "go.uber.org/zap" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/codec" @@ -30,7 +32,6 @@ import ( c "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent-libs/logp" - "go.uber.org/zap" ) func init() { diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index d6b8b9e9475d..a5c1ded2826d 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2394,6 +2394,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index a148cfb3b517..3925d12b82c8 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -142,6 +142,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-events-data + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 1e013fb081f5..242578aaf492 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2010,6 +2010,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index fea1a2fb1153..a5026fdbb353 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -270,6 +270,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-events-data + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 8b7bad94c232..d00eb7b6f0a1 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1426,6 +1426,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index f6d5ac9069e3..012bee36190b 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -155,6 +155,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-events-data + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 45d1c4af8510..3cc7dd140568 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1600,6 +1600,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/auditbeat/auditbeat.yml b/x-pack/auditbeat/auditbeat.yml index 7bdea6578cc7..0e1dbb5c2c28 100644 --- a/x-pack/auditbeat/auditbeat.yml +++ b/x-pack/auditbeat/auditbeat.yml @@ -196,6 +196,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-events-data + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 14308c2cce15..8eae6f4c44a9 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -5016,6 +5016,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index aa50779b9221..3add6f54a11a 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -186,6 +186,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-events-data + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 4e939b686a60..c1dd7ba8e879 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1264,6 +1264,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/functionbeat + + # The name of the files where the logs are written to. + #name: functionbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/functionbeat/functionbeat.yml b/x-pack/functionbeat/functionbeat.yml index 9a2627ca44f1..0544fec54fd4 100644 --- a/x-pack/functionbeat/functionbeat.yml +++ b/x-pack/functionbeat/functionbeat.yml @@ -365,6 +365,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/functionbeat + + # The name of the files where the logs are written to. + #name: functionbeat-events-data + # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 2b2f28382e91..37e20655fef2 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1636,6 +1636,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/heartbeat/heartbeat.yml b/x-pack/heartbeat/heartbeat.yml index 8accb212db4b..0b28eec374e0 100644 --- a/x-pack/heartbeat/heartbeat.yml +++ b/x-pack/heartbeat/heartbeat.yml @@ -152,6 +152,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-events-data + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index a22db4f7f8cf..643131dbba00 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2955,6 +2955,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml index a148cfb3b517..3925d12b82c8 100644 --- a/x-pack/metricbeat/metricbeat.yml +++ b/x-pack/metricbeat/metricbeat.yml @@ -142,6 +142,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-events-data + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index 1de9a267ae5c..0a54ac67afba 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -983,6 +983,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/osquerybeat + + # The name of the files where the logs are written to. + #name: osquerybeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/osquerybeat/osquerybeat.yml b/x-pack/osquerybeat/osquerybeat.yml index 5a3dcde51e97..e187ba70c1e6 100644 --- a/x-pack/osquerybeat/osquerybeat.yml +++ b/x-pack/osquerybeat/osquerybeat.yml @@ -128,6 +128,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/osquerybeat + + # The name of the files where the logs are written to. + #name: osquerybeat-events-data + # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 1e013fb081f5..242578aaf492 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2010,6 +2010,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index fea1a2fb1153..a5026fdbb353 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -270,6 +270,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-events-data + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 528560748fb4..eb3ce116c243 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1428,6 +1428,42 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-events-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to true. + # rotateonstartup: true + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/winlogbeat/winlogbeat.yml b/x-pack/winlogbeat/winlogbeat.yml index bf7d2f819ebb..c88939331f7b 100644 --- a/x-pack/winlogbeat/winlogbeat.yml +++ b/x-pack/winlogbeat/winlogbeat.yml @@ -156,6 +156,20 @@ processors: # "publisher", "service". #logging.selectors: ["*"] +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events together with other +# log messages, a different log file, only for log entries containing raw events, +# is used. It will use the same level, selectors and all other configurations +# from the default logger, but it will have it's own file configuration. +#logging.events: + #files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-events-data + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The From 8f1277d2a2f7fdff051140d82acd7453e60c1980 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 16 Jan 2024 19:34:21 +0100 Subject: [PATCH 07/21] Add integration test --- .../tests/integration/events_log_file_test.go | 131 ++++++++++++++++++ libbeat/tests/integration/framework.go | 6 +- 2 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 filebeat/tests/integration/events_log_file_test.go diff --git a/filebeat/tests/integration/events_log_file_test.go b/filebeat/tests/integration/events_log_file_test.go new file mode 100644 index 000000000000..49d1ba227408 --- /dev/null +++ b/filebeat/tests/integration/events_log_file_test.go @@ -0,0 +1,131 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +var eventsLogFileCfg = ` +filebeat.inputs: + - type: filestream + id: filestream-input-id + enabled: true + parsers: + - ndjson: + target: "" + overwrite_keys: true + expand_keys: true + add_error_key: true + ignore_decoding_error: false + paths: + - %s + +output: + elasticsearch: + hosts: + - localhost:9200 + protocol: http + username: admin + password: testing + +logging: + level: debug + files: + events: + files: + name: filebeat-events-data +` + +func TestEventsLoggerESOutput(t *testing.T) { + // First things first, ensure ES is running and we can connect to it. + // If ES is not running, the test will timeout and the only way to know + // what caused it is going through Filebeat's logs. + integration.EnsureESIsRunning(t) + + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + logFilePath := filepath.Join(filebeat.TempDir(), "log.log") + filebeat.WriteConfigFile(fmt.Sprintf(eventsLogFileCfg, logFilePath)) + + logFile, err := os.Create(logFilePath) + if err != nil { + t.Fatalf("could not create file '%s': %s", logFilePath, err) + } + + logFile.WriteString(` +{"message":"foo bar","int":10,"string":"str"} +{"message":"another message","int":20,"string":"str2"} +{"message":"index failure","int":"not a number","string":10} +{"message":"second index failure","int":"not a number","string":10} +`) + if err := logFile.Sync(); err != nil { + t.Fatalf("could not sync log file '%s': %s", logFilePath, err) + } + if err := logFile.Close(); err != nil { + t.Fatalf("could not close log file '%s': %s", logFilePath, err) + } + + filebeat.Start() + + // Wait for a log entry that indicates an entry in the events + // logger file. + msg := "Cannot index event (status=400)" + require.Eventually(t, func() bool { + return filebeat.LogContains(msg) + }, time.Minute, 100*time.Millisecond, + fmt.Sprintf("String '%s' not found on Filebeat logs", msg)) + + glob := filepath.Join(filebeat.TempDir(), "filebeat-events-data*.ndjson") + files, err := filepath.Glob(glob) + if err != nil { + t.Fatalf("could not read files matching glob '%s': %s", glob, err) + } + if len(files) != 1 { + t.Fatalf("there must be only one file matching the glob '%s', found: %s", glob, files) + } + + eventsLogFile := files[0] + data, err := os.ReadFile(eventsLogFile) + if err != nil { + t.Fatalf("could not read '%s': %s", eventsLogFile, err) + } + + strData := string(data) + eventMsg := "not a number" + if !strings.Contains(strData, eventMsg) { + t.Errorf("expecting to find '%s' on '%s'", eventMsg, eventsLogFile) + t.Errorf("Contents:\n%s", strData) + t.FailNow() + } +} diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 046c578d7cd7..583c348fa5d0 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -366,7 +366,11 @@ func (b *BeatProc) WriteConfigFile(cfg string) { // when the test ends. func (b *BeatProc) openLogFile() *os.File { t := b.t - glob := fmt.Sprintf("%s-*.ndjson", filepath.Join(b.tempDir, b.beatName)) + // Beats can produce two different log files, to make sure we're + // reading the normal one we add the year to the glob. The default + // log file name looks like: filebeat-20240116.ndjson + year := time.Now().Year() + glob := fmt.Sprintf("%s-%d*.ndjson", filepath.Join(b.tempDir, b.beatName), year) files, err := filepath.Glob(glob) if err != nil { t.Fatalf("could not expand log file glob: %s", err) From 3dd701da75c994f4dfdab2edfe182d2428447405 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 17 Jan 2024 08:36:45 +0100 Subject: [PATCH 08/21] Add changelog --- CHANGELOG.next.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b8c46fa18c59..58d2a41e5262 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -129,6 +129,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] - Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] - Make more selective the Pod autodiscovery upon node and namespace update events. {issue}37338[37338] {pull}37431[37431] +- Raw event data logged by outputs on error is now logged to a different log file {pull}37475[37475] *Auditbeat* From 5fda9977cc6c1967444c3ba70032c9c76b8a4af5 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 17 Jan 2024 11:13:49 +0100 Subject: [PATCH 09/21] update documentation --- libbeat/docs/loggingconfig.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index b6f3eef9cbb9..46d143a146cb 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -304,6 +304,9 @@ log messages, a different log file, only for log entries containing raw events, is used. It will use the same level, selectors and all other configurations from the default logger, but it will have it's own file configuration. +IMPORTANT: No matter the default logger output configuration, raw events +will **always** be logged to a file configured by `logging.events.files`. + [float] ==== `logging.events.files.path` From a558c68ef123073e0f8419aa549c15bd24eed828 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 17 Jan 2024 11:23:16 +0100 Subject: [PATCH 10/21] fix tests --- x-pack/filebeat/input/lumberjack/server_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/x-pack/filebeat/input/lumberjack/server_test.go b/x-pack/filebeat/input/lumberjack/server_test.go index c7db6abf0b54..d03bf1353b95 100644 --- a/x-pack/filebeat/input/lumberjack/server_test.go +++ b/x-pack/filebeat/input/lumberjack/server_test.go @@ -52,8 +52,8 @@ func TestServer(t *testing.T) { c := makeTestConfig() c.TLS = serverConf // Disable mTLS requirements in the server. - var clientAuth = tlscommon.TLSClientAuthNone - c.TLS.ClientAuth = &clientAuth + clientAuth := tlscommon.TLSClientAuthNone + c.TLS.ClientAuth = &clientAuth // tls.NoClientCert c.TLS.VerificationMode = tlscommon.VerifyNone testSendReceive(t, c, 10, clientConf) @@ -221,12 +221,12 @@ func tlsSetup(t *testing.T) (clientConfig *tls.Config, serverConfig *tlscommon.S MinVersion: tls.VersionTLS12, } - var clientAuth = tlscommon.TLSClientAuthRequired - + clientAuth := tlscommon.TLSClientAuthRequired serverConfig = &tlscommon.ServerConfig{ // NOTE: VerifyCertificate is ineffective unless ClientAuth is set to RequireAndVerifyClientCert. VerificationMode: tlscommon.VerifyCertificate, - ClientAuth: &clientAuth, // tls.RequireAndVerifyClientCert + // Unfortunately ServerConfig uses an unexported type in an exported field. + ClientAuth: &clientAuth, // tls.RequireAndVerifyClientCert CAs: []string{ string(certData.ca.CertPEM(t)), }, From 28eeaf59d0e7f51fb83e5744d7cb6de4bbdc1f7f Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 17 Jan 2024 11:58:45 +0100 Subject: [PATCH 11/21] Fix lint issues and small improvements This commit fixes the lint issues and adds small improvements. --- filebeat/tests/integration/events_log_file_test.go | 2 +- libbeat/cmd/instance/beat.go | 9 ++++++--- libbeat/outputs/elasticsearch/client_integration_test.go | 5 +++-- libbeat/outputs/kafka/client.go | 2 +- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/filebeat/tests/integration/events_log_file_test.go b/filebeat/tests/integration/events_log_file_test.go index 49d1ba227408..ecc9e80a068d 100644 --- a/filebeat/tests/integration/events_log_file_test.go +++ b/filebeat/tests/integration/events_log_file_test.go @@ -83,7 +83,7 @@ func TestEventsLoggerESOutput(t *testing.T) { t.Fatalf("could not create file '%s': %s", logFilePath, err) } - logFile.WriteString(` + _, _ = logFile.WriteString(` {"message":"foo bar","int":10,"string":"str"} {"message":"another message","int":20,"string":"str2"} {"message":"index failure","int":"not a number","string":10} diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index cff6d7120333..9537aaef84ff 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -386,7 +386,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { eventsLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) // merge eventsLoggerCfg with b.Config.Logging, so logging.events.* only - // overwrites logging.* + // overwrites the files block. if err := b.Config.EventLogging.Unpack(&eventsLoggerCfg); err != nil { return nil, fmt.Errorf("error initialising events logger: %w", err) } @@ -807,11 +807,14 @@ func (b *Beat) configure(settings Settings) error { if b.Config.EventLogging == nil { b.Config.EventLogging = config.NewConfig() } - b.Config.EventLogging.Merge(b.Config.Logging) - if _, err := b.Config.EventLogging.Remove("events", -1); err != nil { + if err := b.Config.EventLogging.Merge(b.Config.Logging); err != nil { return fmt.Errorf("cannot merge logging and logging.events configuration: %w", err) } + if _, err := b.Config.EventLogging.Remove("events", -1); err != nil { + return fmt.Errorf("cannot update logging.events configuration: %w", err) + } + if err := promoteOutputQueueSettings(&b.Config); err != nil { return fmt.Errorf("could not promote output queue settings: %w", err) } diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 4322ad13412e..7eebadc80098 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -423,8 +423,9 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu // disable ILM if using specified index name im, _ := idxmgmt.DefaultSupport(nil, info, conf.MustNewConfigFrom(map[string]interface{}{"setup.ilm.enabled": "false"})) - // Creates the events logger configuration for testing - // It used the default one but logs to stderr instead of a file + // Creates the events logger configuration for testing, + // it uses the default one but logs to stderr instead of a file. + // This prevents the test to leave log files behind. eventsLoggerCfg := logp.DefaultConfig(logp.DefaultEnvironment) eventsLoggerCfg.Level = logp.DebugLevel eventsLoggerCfg.ToStderr = true diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 6acebdfab9a0..b1cd7dcecab5 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -236,8 +236,8 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { serializedEvent, err := c.codec.Encode(c.index, event) if err != nil { if c.log.IsDebug() { - c.eventsLogger.Debugf("failed event: %v", event) c.log.Debug("failed event logged to events logger file") + c.eventsLogger.Debugf("failed event: %v", event) } return nil, err } From 28aecd019b9a5cb2dd433538b415e3992b7c5fc1 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 17 Jan 2024 20:21:27 +0100 Subject: [PATCH 12/21] Fix bug when merging logging configurations --- libbeat/cmd/instance/beat.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 9537aaef84ff..5c1727ff883a 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -804,9 +804,15 @@ func (b *Beat) configure(settings Settings) error { return fmt.Errorf("error unpacking config data: %w", err) } + // If either b.Config.EventLoggingor b.Config.Logging are nil + // merging them will fail, so in case any of them is nil, + // we set them to an empty config.C if b.Config.EventLogging == nil { b.Config.EventLogging = config.NewConfig() } + if b.Config.Logging == nil { + b.Config.Logging = config.NewConfig() + } if err := b.Config.EventLogging.Merge(b.Config.Logging); err != nil { return fmt.Errorf("cannot merge logging and logging.events configuration: %w", err) } From 4a829d9f5423a36b3f77582b335a5a674535607b Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 18 Jan 2024 17:37:45 +0100 Subject: [PATCH 13/21] Add diagnostics hook to collect events log files --- libbeat/cmd/instance/beat.go | 45 ++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 5c1727ff883a..1bbf713bb651 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -18,6 +18,7 @@ package instance import ( + "bytes" "context" cryptRand "crypto/rand" "encoding/json" @@ -31,6 +32,7 @@ import ( "net" "os" "os/user" + "path/filepath" "runtime" "runtime/debug" "strconv" @@ -398,6 +400,11 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { eventsLoggerCfg.Files.Name = eventsLoggerCfg.Files.Name + "-events-data" } + // Now that the events logger is configured, we can register it's diagnostic + // hook + b.Manager.RegisterDiagnosticHook("events log", + "log files containing raw events", "events_log.ndjson", + "application/x-ndjson", b.eventsLogDiagnosticsHook(eventsLoggerCfg)) outputFactory := b.makeOutputFactory(b.Config.Output, eventsLoggerCfg) settings := pipeline.Settings{ Processors: b.processors, @@ -423,6 +430,44 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { return beater, nil } +func (b *Beat) eventsLogDiagnosticsHook(logCfg logp.Config) func() []byte { + // Setup a no-op function to return in case of an error + data := []byte{} + fn := func() []byte { + return data + } + + glob := fmt.Sprintf("%s*.ndjson", + paths.Resolve( + paths.Logs, + filepath.Join( + logCfg.Files.Path, + logCfg.LogFilename(), + ))) + + files, err := filepath.Glob(glob) + if err != nil { + logp.Warn("could not get 'event log' files: %s", err) + return fn + } + + filesData := [][]byte{} + fn = func() []byte { + return bytes.Join(filesData, []byte{}) + } + + for _, f := range files { + logData, err := os.ReadFile(f) + if err != nil { + logp.Warn("could not read event log file '%s': %s", f, err) + return fn + } + filesData = append(filesData, logData) + } + + return fn +} + func (b *Beat) launch(settings Settings, bt beat.Creator) error { defer func() { _ = logp.Sync() From 6bd6ff1990385324437e0a4c7d9a3c1bdaf9b7cc Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 22 Jan 2024 12:06:23 +0100 Subject: [PATCH 14/21] Improve documentation --- auditbeat/auditbeat.reference.yml | 12 ++++++++---- filebeat/filebeat.reference.yml | 12 ++++++++---- heartbeat/heartbeat.reference.yml | 12 ++++++++---- libbeat/_meta/config/logging.reference.yml.tmpl | 12 ++++++++---- libbeat/docs/loggingconfig.asciidoc | 12 ++++++++---- metricbeat/metricbeat.reference.yml | 12 ++++++++---- packetbeat/packetbeat.reference.yml | 12 ++++++++---- winlogbeat/winlogbeat.reference.yml | 12 ++++++++---- x-pack/auditbeat/auditbeat.reference.yml | 12 ++++++++---- x-pack/filebeat/filebeat.reference.yml | 12 ++++++++---- x-pack/functionbeat/functionbeat.reference.yml | 12 ++++++++---- x-pack/heartbeat/heartbeat.reference.yml | 12 ++++++++---- x-pack/metricbeat/metricbeat.reference.yml | 12 ++++++++---- x-pack/osquerybeat/osquerybeat.reference.yml | 12 ++++++++---- x-pack/packetbeat/packetbeat.reference.yml | 12 ++++++++---- x-pack/winlogbeat/winlogbeat.reference.yml | 12 ++++++++---- 16 files changed, 128 insertions(+), 64 deletions(-) diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 0e0530db6b0c..53b2d738eec6 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1545,10 +1545,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 7cf1f54d1616..f833cdca225d 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2641,10 +2641,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 37e20655fef2..1bf1b84bd9c8 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1637,10 +1637,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index d43818aa743c..2d0f3958ac4e 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -69,10 +69,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index 46d143a146cb..c17e2d4d9610 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -299,10 +299,14 @@ ifndef::serverless[] === Configuration options for events logger Some outputs will log raw events on errors like indexing errors in the -Elasticsearch output, to prevent logging raw events together with other -log messages, a different log file, only for log entries containing raw events, -is used. It will use the same level, selectors and all other configurations -from the default logger, but it will have it's own file configuration. +Elasticsearch output, to prevent logging raw events (that may contain +sensitive information) together with other log messages, a different +log file, only for log entries containing raw events, is used. It will +use the same level, selectors and all other configurations from the +default logger, but it will have it's own file configuration. + +Having a different log file for raw events also prevents event data +from drowning out the regular log files. IMPORTANT: No matter the default logger output configuration, raw events will **always** be logged to a file configured by `logging.events.files`. diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index a5c1ded2826d..ca6be3a672ab 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2395,10 +2395,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 242578aaf492..fa5116756f6c 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2011,10 +2011,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index d00eb7b6f0a1..39841c05bca8 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1427,10 +1427,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 3cc7dd140568..3b190d18bfcb 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1601,10 +1601,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 8eae6f4c44a9..57f9c3445d0b 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -5017,10 +5017,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index c1dd7ba8e879..49d507bb9bd3 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1265,10 +1265,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 37e20655fef2..1bf1b84bd9c8 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1637,10 +1637,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 643131dbba00..0d6533ffc1ba 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2956,10 +2956,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index 0a54ac67afba..fc8e9cebeda5 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -984,10 +984,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 242578aaf492..fa5116756f6c 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2011,10 +2011,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index eb3ce116c243..2c1cf02f13d5 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1429,10 +1429,14 @@ logging.files: # rotateonstartup: true # Some outputs will log raw events on errors like indexing errors in the -# Elasticsearch output, to prevent logging raw events together with other -# log messages, a different log file, only for log entries containing raw events, -# is used. It will use the same level, selectors and all other configurations -# from the default logger, but it will have it's own file configuration. +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. + +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. #logging.events: #files: # Configure the path where the logs are written. The default is the logs directory From 3d6447e97c0a0544f970a03cbb0a140c19013eb1 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 22 Jan 2024 15:35:08 +0100 Subject: [PATCH 15/21] Update defaults for events logger --- auditbeat/auditbeat.reference.yml | 4 ++-- filebeat/filebeat.reference.yml | 4 ++-- heartbeat/heartbeat.reference.yml | 4 ++-- libbeat/_meta/config/logging.reference.yml.tmpl | 4 ++-- libbeat/cmd/instance/beat.go | 5 ++++- libbeat/docs/loggingconfig.asciidoc | 4 ++-- metricbeat/metricbeat.reference.yml | 4 ++-- packetbeat/packetbeat.reference.yml | 4 ++-- winlogbeat/winlogbeat.reference.yml | 4 ++-- x-pack/auditbeat/auditbeat.reference.yml | 4 ++-- x-pack/filebeat/filebeat.reference.yml | 4 ++-- x-pack/functionbeat/functionbeat.reference.yml | 4 ++-- x-pack/heartbeat/heartbeat.reference.yml | 4 ++-- x-pack/metricbeat/metricbeat.reference.yml | 4 ++-- x-pack/osquerybeat/osquerybeat.reference.yml | 4 ++-- x-pack/packetbeat/packetbeat.reference.yml | 4 ++-- x-pack/winlogbeat/winlogbeat.reference.yml | 4 ++-- 17 files changed, 36 insertions(+), 33 deletions(-) diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 53b2d738eec6..9f965c0c7616 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1564,10 +1564,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index f833cdca225d..272cff60d8ea 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2660,10 +2660,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 1bf1b84bd9c8..1c7317042850 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1656,10 +1656,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index 2d0f3958ac4e..d8e91dfffdbb 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -88,10 +88,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 1bbf713bb651..4362e276b734 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -384,8 +384,11 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { // Get the default/current logging configuration // we need some defaults to be populates otherwise Unpack will - // fail + // fail. We also overwrite some defaults that are specific to the + // events logger. eventsLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + eventsLoggerCfg.Files.MaxSize = 5242880 // 5MB + eventsLoggerCfg.Files.MaxBackups = 5 // merge eventsLoggerCfg with b.Config.Logging, so logging.events.* only // overwrites the files block. diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index c17e2d4d9610..f9cc07563eb2 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -326,13 +326,13 @@ The name of the file that logs are written to. The default is '{beatname_lc}'. ==== `logging.events.files.rotateeverybytes` The maximum size of a log file. If the limit is reached, a new log file is -generated. The default size limit is 10485760 (10 MB). +generated. The default size limit is 5242880 (5 MB). [float] ==== `logging.events.files.keepfiles` The number of most recent rotated log files to keep on disk. Older files are -deleted during log rotation. The default value is 7. The `keepfiles` options has +deleted during log rotation. The default value is 5. The `keepfiles` options has to be in the range of 2 to 1024 files. [float] diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index ca6be3a672ab..4420d4a2dc6e 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2414,10 +2414,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index fa5116756f6c..2c51bdb8c91b 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2030,10 +2030,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 39841c05bca8..78189b2e3c12 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1446,10 +1446,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 3b190d18bfcb..1fc28d0cb4c7 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1620,10 +1620,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 57f9c3445d0b..5bb862b98d05 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -5036,10 +5036,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 49d507bb9bd3..4e12aa9ace95 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1284,10 +1284,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 1bf1b84bd9c8..1c7317042850 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1656,10 +1656,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 0d6533ffc1ba..1bd277059d15 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2975,10 +2975,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index fc8e9cebeda5..9b0c50a1b0b0 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -1003,10 +1003,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index fa5116756f6c..2c51bdb8c91b 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2030,10 +2030,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 2c1cf02f13d5..b4980c973bc8 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1448,10 +1448,10 @@ logging.files: # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. - #rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 5242880 # = 5MB # Number of rotated log files to keep. The oldest files will be deleted first. - #keepfiles: 7 + #keepfiles: 5 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. From e892d1e0b99fea89094b905fd217af8cd5f514ba Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 22 Jan 2024 18:11:08 +0100 Subject: [PATCH 16/21] add logger.type to events logger --- libbeat/outputs/elasticsearch/elasticsearch.go | 3 ++- libbeat/outputs/fileout/file.go | 4 +++- libbeat/outputs/kafka/client.go | 3 ++- libbeat/outputs/redis/client.go | 3 ++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index 3fd89bd7c581..975c78a13161 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -45,7 +45,8 @@ func makeES( log := logp.NewLogger(logSelector) eventsLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.With("logger.type", "sensitive") if !cfg.HasField("bulk_max_size") { if err := cfg.SetInt("bulk_max_size", -1, defaultBulkSize); err != nil { diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index 884fa0a054b3..a0aadfd53e43 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -67,7 +67,9 @@ func makeFileout( logSelector := "file" eventsLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.With("logger.type", "sensitive") + fo := &fileOutput{ log: logp.NewLogger(logSelector), eventsLogger: eventsLogger, diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index b1cd7dcecab5..45f834888060 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -87,7 +87,8 @@ func newKafkaClient( ) (*client, error) { eventsLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.With("logger.type", "sensitive") c := &client{ log: logp.NewLogger(logSelector), diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index a25e2750b6a1..e6b40c63e047 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -81,7 +81,8 @@ func newClient( logSelector := "redis" eventsLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) + eventsLogger = eventsLogger.With("logger.type", "sensitive") return &client{ log: logp.NewLogger(logSelector), From 8c29eb310b9aa7bceee921060a4f67d53b03f53d Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 22 Jan 2024 18:15:29 +0100 Subject: [PATCH 17/21] rename eventsLogger to sensitiveLogger --- auditbeat/auditbeat.reference.yml | 4 +- auditbeat/auditbeat.yml | 4 +- filebeat/filebeat.reference.yml | 4 +- filebeat/filebeat.yml | 4 +- go.mod | 2 +- go.sum | 4 +- heartbeat/heartbeat.reference.yml | 4 +- heartbeat/heartbeat.yml | 4 +- .../_meta/config/logging.reference.yml.tmpl | 4 +- libbeat/_meta/config/logging.yml.tmpl | 4 +- libbeat/cmd/instance/beat.go | 61 ++++++++++--------- libbeat/cmd/instance/beat_test.go | 4 +- libbeat/docs/loggingconfig.asciidoc | 20 +++--- libbeat/outputs/console/console.go | 2 +- libbeat/outputs/elasticsearch/client.go | 14 ++--- .../elasticsearch/client_integration_test.go | 10 +-- .../outputs/elasticsearch/elasticsearch.go | 10 +-- libbeat/outputs/fileout/file.go | 14 ++--- libbeat/outputs/kafka/client.go | 14 ++--- libbeat/outputs/kafka/kafka.go | 4 +- libbeat/outputs/logstash/logstash.go | 2 +- libbeat/outputs/output_reg.go | 6 +- libbeat/outputs/redis/client.go | 22 +++---- libbeat/outputs/redis/redis.go | 4 +- libbeat/outputs/shipper/shipper.go | 2 +- libbeat/publisher/pipeline/controller.go | 4 +- libbeat/publisher/pipeline/pipeline.go | 2 +- libbeat/publisher/pipeline/stress/out.go | 2 +- metricbeat/metricbeat.reference.yml | 4 +- metricbeat/metricbeat.yml | 4 +- packetbeat/packetbeat.reference.yml | 4 +- packetbeat/packetbeat.yml | 4 +- winlogbeat/winlogbeat.reference.yml | 4 +- winlogbeat/winlogbeat.yml | 4 +- x-pack/auditbeat/auditbeat.reference.yml | 4 +- x-pack/auditbeat/auditbeat.yml | 4 +- .../pipelinemanager/libbeattools.go | 8 +-- x-pack/filebeat/filebeat.reference.yml | 4 +- x-pack/filebeat/filebeat.yml | 4 +- .../functionbeat/functionbeat.reference.yml | 4 +- x-pack/functionbeat/functionbeat.yml | 4 +- x-pack/heartbeat/heartbeat.reference.yml | 4 +- x-pack/heartbeat/heartbeat.yml | 4 +- x-pack/metricbeat/metricbeat.reference.yml | 4 +- x-pack/metricbeat/metricbeat.yml | 4 +- x-pack/osquerybeat/osquerybeat.reference.yml | 4 +- x-pack/osquerybeat/osquerybeat.yml | 4 +- x-pack/packetbeat/packetbeat.reference.yml | 4 +- x-pack/packetbeat/packetbeat.yml | 4 +- x-pack/winlogbeat/winlogbeat.reference.yml | 4 +- x-pack/winlogbeat/winlogbeat.yml | 4 +- 51 files changed, 166 insertions(+), 165 deletions(-) diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 9f965c0c7616..2483daa33c4f 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1553,14 +1553,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-events-data + #name: auditbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/auditbeat/auditbeat.yml b/auditbeat/auditbeat.yml index e882ac93aaff..ba867c0f20fa 100644 --- a/auditbeat/auditbeat.yml +++ b/auditbeat/auditbeat.yml @@ -174,14 +174,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-events-data + #name: auditbeat-sensitive # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 272cff60d8ea..2577942ed124 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2649,14 +2649,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-events-data + #name: filebeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index 3add6f54a11a..863abb33bb7c 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -191,14 +191,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-events-data + #name: filebeat-sensitive # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring diff --git a/go.mod b/go.mod index 5c9e344d89e1..b1598cf2dc87 100644 --- a/go.mod +++ b/go.mod @@ -420,4 +420,4 @@ replace ( // Exclude this version because the version has an invalid checksum. exclude github.com/docker/distribution v2.8.0+incompatible -replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20240116105334-25f61a14ad41 +replace github.com/elastic/elastic-agent-libs => github.com/belimawr/elastic-agent-libs v0.2.9-0.20240122163001-efb117578ab2 diff --git a/go.sum b/go.sum index e0c506e96c0a..3cfab73cd90e 100644 --- a/go.sum +++ b/go.sum @@ -373,8 +373,8 @@ github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56 github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 h1:lxW5Q6K2IisyF5tlr6Ts0W4POGWQZco05MJjFmoeIHs= github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= -github.com/belimawr/elastic-agent-libs v0.2.9-0.20240116105334-25f61a14ad41 h1:4kwfzIBmNATT0es3HsgZP7W4p6OUo1TCOk5qchsUzTs= -github.com/belimawr/elastic-agent-libs v0.2.9-0.20240116105334-25f61a14ad41/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20240122163001-efb117578ab2 h1:QOTo5kTJ8oqdrSOH8/OhSkEMA3mnRltGg52M9YyH7Zo= +github.com/belimawr/elastic-agent-libs v0.2.9-0.20240122163001-efb117578ab2/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 1c7317042850..fceb17e7d777 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1645,14 +1645,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-events-data + #name: heartbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index 0b28eec374e0..be707ad671e4 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -157,14 +157,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-events-data + #name: heartbeat-sensitive # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index d8e91dfffdbb..1cc018bbf3e1 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -77,14 +77,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/{{.BeatName}} # The name of the files where the logs are written to. - #name: {{.BeatName}}-events-data + #name: {{.BeatName}}-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/libbeat/_meta/config/logging.yml.tmpl b/libbeat/_meta/config/logging.yml.tmpl index 7fe93c9fc0a1..8a1f5d18cc84 100644 --- a/libbeat/_meta/config/logging.yml.tmpl +++ b/libbeat/_meta/config/logging.yml.tmpl @@ -14,11 +14,11 @@ # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/{{.BeatName}} # The name of the files where the logs are written to. - #name: {{.BeatName}}-events-data + #name: {{.BeatName}}-sensitive diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 4362e276b734..0f9b1816b1a7 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -122,15 +122,15 @@ type beatConfig struct { Features *config.C `config:"features"` // beat internal components configurations - HTTP *config.C `config:"http"` - HTTPPprof *pprof.Config `config:"http.pprof"` - BufferConfig *config.C `config:"http.buffer"` - Path paths.Path `config:"path"` - Logging *config.C `config:"logging"` - EventLogging *config.C `config:"logging.events"` - MetricLogging *config.C `config:"logging.metrics"` - Keystore *config.C `config:"keystore"` - Instrumentation instrumentation.Config `config:"instrumentation"` + HTTP *config.C `config:"http"` + HTTPPprof *pprof.Config `config:"http.pprof"` + BufferConfig *config.C `config:"http.buffer"` + Path paths.Path `config:"path"` + Logging *config.C `config:"logging"` + SensitiveLogging *config.C `config:"logging.sensitive"` + MetricLogging *config.C `config:"logging.metrics"` + Keystore *config.C `config:"keystore"` + Instrumentation instrumentation.Config `config:"instrumentation"` // output/publishing related configurations Pipeline pipeline.Config `config:",inline"` @@ -386,29 +386,30 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { // we need some defaults to be populates otherwise Unpack will // fail. We also overwrite some defaults that are specific to the // events logger. - eventsLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) - eventsLoggerCfg.Files.MaxSize = 5242880 // 5MB - eventsLoggerCfg.Files.MaxBackups = 5 + sensitiveLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + sensitiveLoggerCfg.ToFiles = true // make the default explicit + sensitiveLoggerCfg.Files.MaxSize = 5242880 // 5MB + sensitiveLoggerCfg.Files.MaxBackups = 5 - // merge eventsLoggerCfg with b.Config.Logging, so logging.events.* only + // merge sensitiveLoggerCfg with b.Config.Logging, so logging.sensitive.* only // overwrites the files block. - if err := b.Config.EventLogging.Unpack(&eventsLoggerCfg); err != nil { + if err := b.Config.SensitiveLogging.Unpack(&sensitiveLoggerCfg); err != nil { return nil, fmt.Errorf("error initialising events logger: %w", err) } // Ensure the default filename is set - if eventsLoggerCfg.Files.Name == "" { - eventsLoggerCfg.Files.Name = b.Info.Beat + if sensitiveLoggerCfg.Files.Name == "" { + sensitiveLoggerCfg.Files.Name = b.Info.Beat // Append the name so the files do not overwrite themselves. - eventsLoggerCfg.Files.Name = eventsLoggerCfg.Files.Name + "-events-data" + sensitiveLoggerCfg.Files.Name = sensitiveLoggerCfg.Files.Name + "-events-data" } // Now that the events logger is configured, we can register it's diagnostic // hook b.Manager.RegisterDiagnosticHook("events log", "log files containing raw events", "events_log.ndjson", - "application/x-ndjson", b.eventsLogDiagnosticsHook(eventsLoggerCfg)) - outputFactory := b.makeOutputFactory(b.Config.Output, eventsLoggerCfg) + "application/x-ndjson", b.eventsLogDiagnosticsHook(sensitiveLoggerCfg)) + outputFactory := b.makeOutputFactory(b.Config.Output, sensitiveLoggerCfg) settings := pipeline.Settings{ Processors: b.processors, InputQueueSize: b.InputQueueSize, @@ -418,7 +419,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { return nil, fmt.Errorf("error initializing publisher: %w", err) } - reload.RegisterV2.MustRegisterOutput(b.makeOutputReloader(publisher.OutputReloader(), eventsLoggerCfg)) + reload.RegisterV2.MustRegisterOutput(b.makeOutputReloader(publisher.OutputReloader(), sensitiveLoggerCfg)) // TODO: some beats race on shutdown with publisher.Stop -> do not call Stop yet, // but refine publisher to disconnect clients on stop automatically @@ -855,18 +856,18 @@ func (b *Beat) configure(settings Settings) error { // If either b.Config.EventLoggingor b.Config.Logging are nil // merging them will fail, so in case any of them is nil, // we set them to an empty config.C - if b.Config.EventLogging == nil { - b.Config.EventLogging = config.NewConfig() + if b.Config.SensitiveLogging == nil { + b.Config.SensitiveLogging = config.NewConfig() } if b.Config.Logging == nil { b.Config.Logging = config.NewConfig() } - if err := b.Config.EventLogging.Merge(b.Config.Logging); err != nil { - return fmt.Errorf("cannot merge logging and logging.events configuration: %w", err) + if err := b.Config.SensitiveLogging.Merge(b.Config.Logging); err != nil { + return fmt.Errorf("cannot merge logging and logging.sensitive configuration: %w", err) } - if _, err := b.Config.EventLogging.Remove("events", -1); err != nil { - return fmt.Errorf("cannot update logging.events configuration: %w", err) + if _, err := b.Config.SensitiveLogging.Remove("events", -1); err != nil { + return fmt.Errorf("cannot update logging.sensitive configuration: %w", err) } if err := promoteOutputQueueSettings(&b.Config); err != nil { @@ -1176,7 +1177,7 @@ func (b *Beat) indexSetupCallback() elasticsearch.ConnectCallback { } } -func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader, eventsLoggerCfg logp.Config) reload.Reloadable { +func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader, sensitiveLoggerCfg logp.Config) reload.Reloadable { return reload.ReloadableFunc(func(update *reload.ConfigWithMeta) error { if update == nil { return nil @@ -1198,7 +1199,7 @@ func (b *Beat) makeOutputReloader(outReloader pipeline.OutputReloader, eventsLog } } - return outReloader.Reload(update, eventsLoggerCfg, b.createOutput) + return outReloader.Reload(update, sensitiveLoggerCfg, b.createOutput) }) } @@ -1303,7 +1304,7 @@ func (b *Beat) reloadOutputOnCertChange(cfg config.Namespace) error { return nil } -func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace, eventsLoggerCfg logp.Config) (outputs.Group, error) { +func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace, sensitiveLoggerCfg logp.Config) (outputs.Group, error) { if !cfg.IsSet() { return outputs.Group{}, nil } @@ -1312,7 +1313,7 @@ func (b *Beat) createOutput(stats outputs.Observer, cfg config.Namespace, events return outputs.Group{}, fmt.Errorf("could not setup output certificates reloader: %w", err) } - return outputs.Load(b.IdxSupporter, b.Info, stats, cfg.Name(), cfg.Config(), eventsLoggerCfg) + return outputs.Load(b.IdxSupporter, b.Info, stats, cfg.Name(), cfg.Config(), sensitiveLoggerCfg) } func (b *Beat) registerClusterUUIDFetching() { diff --git a/libbeat/cmd/instance/beat_test.go b/libbeat/cmd/instance/beat_test.go index 0ee30cdcc720..184797591b86 100644 --- a/libbeat/cmd/instance/beat_test.go +++ b/libbeat/cmd/instance/beat_test.go @@ -267,8 +267,8 @@ type outputReloaderMock struct { func (r *outputReloaderMock) Reload( cfg *reload.ConfigWithMeta, - eventsLoggerCfg logp.Config, - factory func(o outputs.Observer, cfg config.Namespace, eventsLoggerCfg logp.Config) (outputs.Group, error), + sensitiveLoggerCfg logp.Config, + factory func(o outputs.Observer, cfg config.Namespace, sensitiveLoggerCfg logp.Config) (outputs.Group, error), ) error { r.cfg = cfg return nil diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index f9cc07563eb2..89181dc69617 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -296,7 +296,7 @@ Below are some samples: ifndef::serverless[] [float] -=== Configuration options for events logger +=== Configuration options for sensitive logger Some outputs will log raw events on errors like indexing errors in the Elasticsearch output, to prevent logging raw events (that may contain @@ -309,34 +309,34 @@ Having a different log file for raw events also prevents event data from drowning out the regular log files. IMPORTANT: No matter the default logger output configuration, raw events -will **always** be logged to a file configured by `logging.events.files`. +will **always** be logged to a file configured by `logging.sensitive.files`. [float] -==== `logging.events.files.path` +==== `logging.sensitive.files.path` The directory that log files are written to. The default is the logs path. See the <> section for details. [float] -==== `logging.events.files.name` +==== `logging.sensitive.files.name` -The name of the file that logs are written to. The default is '{beatname_lc}'. +The name of the file that logs are written to. The default is '{beatname_lc}'-sensitive. [float] -==== `logging.events.files.rotateeverybytes` +==== `logging.sensitive.files.rotateeverybytes` The maximum size of a log file. If the limit is reached, a new log file is generated. The default size limit is 5242880 (5 MB). [float] -==== `logging.events.files.keepfiles` +==== `logging.sensitive.files.keepfiles` The number of most recent rotated log files to keep on disk. Older files are deleted during log rotation. The default value is 5. The `keepfiles` options has to be in the range of 2 to 1024 files. [float] -==== `logging.events.files.permissions` +==== `logging.sensitive.files.permissions` The permissions mask to apply when rotating log files. The default value is 0600. The `permissions` option must be a valid Unix-style file permissions mask @@ -354,7 +354,7 @@ Examples: * 0600: give read and write access to the file owner, and no access to all others. [float] -==== `logging.events.files.interval` +==== `logging.sensitive.files.interval` Enable log file rotation on time intervals in addition to size-based rotation. Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h @@ -363,7 +363,7 @@ reported by the local system clock. All other intervals are calculated from the unix epoch. Defaults to disabled. [float] -==== `logging.events.files.rotateonstartup` +==== `logging.sensitive.files.rotateonstartup` If the log file already exists on startup, immediately rotate it and start writing to a new file instead of appending to the existing one. Defaults to diff --git a/libbeat/outputs/console/console.go b/libbeat/outputs/console/console.go index a7cc4a69e6ad..753fe0b4b30d 100644 --- a/libbeat/outputs/console/console.go +++ b/libbeat/outputs/console/console.go @@ -51,7 +51,7 @@ func makeConsole( beat beat.Info, observer outputs.Observer, cfg *config.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { config := defaultConfig err := cfg.Unpack(&config) diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index b9728fc7bf88..22df451600e4 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -56,7 +56,7 @@ type Client struct { NonIndexableAction string log *logp.Logger - eventsLogger *logp.Logger + sensitiveLogger *logp.Logger } // ClientSettings contains the settings for a client. @@ -83,7 +83,7 @@ const ( // NewClient instantiates a new client. func NewClient( logger *logp.Logger, - eventsLogger *logp.Logger, + sensitiveLogger *logp.Logger, s ClientSettings, onConnect *callbacksRegistry, ) (*Client, error) { @@ -144,7 +144,7 @@ func NewClient( NonIndexableAction: s.NonIndexableAction, log: logger, - eventsLogger: eventsLogger, + sensitiveLogger: sensitiveLogger, } return client, nil @@ -179,7 +179,7 @@ func (client *Client) Clone() *Client { c, _ := NewClient( client.log, - client.eventsLogger, + client.sensitiveLogger, ClientSettings{ ConnectionSettings: connection, Index: client.index, @@ -438,11 +438,11 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat if result { stats.nonIndexable++ client.log.Errorf("Can't deliver to dead letter index event (status=%v). Look for events-data log file to view the event and cause.", status) - client.eventsLogger.Errorf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) + client.sensitiveLogger.Errorf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) // poison pill - this will clog the pipeline if the underlying failure is non transient. } else if client.NonIndexableAction == dead_letter_index { client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Look for events-data log file to view the event and cause.", status) - client.eventsLogger.Warnf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) + client.sensitiveLogger.Warnf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) if data[i].Content.Meta == nil { data[i].Content.Meta = mapstr.M{ dead_letter_marker_field: true, @@ -458,7 +458,7 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat } else { // drop stats.nonIndexable++ client.log.Warnf("Cannot index event (status=%v): dropping event! Look for events-data log file to view the event and cause.", status) - client.eventsLogger.Warnf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) + client.sensitiveLogger.Warnf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) continue } } diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 7eebadc80098..2ecafac5f1d8 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -426,12 +426,12 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu // Creates the events logger configuration for testing, // it uses the default one but logs to stderr instead of a file. // This prevents the test to leave log files behind. - eventsLoggerCfg := logp.DefaultConfig(logp.DefaultEnvironment) - eventsLoggerCfg.Level = logp.DebugLevel - eventsLoggerCfg.ToStderr = true - eventsLoggerCfg.ToFiles = false + sensitiveLoggerCfg := logp.DefaultConfig(logp.DefaultEnvironment) + sensitiveLoggerCfg.Level = logp.DebugLevel + sensitiveLoggerCfg.ToStderr = true + sensitiveLoggerCfg.ToFiles = false - output, err := makeES(im, info, stats, config, eventsLoggerCfg) + output, err := makeES(im, info, stats, config, sensitiveLoggerCfg) if err != nil { t.Fatal(err) } diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index 975c78a13161..10d41279e50c 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -40,13 +40,13 @@ func makeES( beat beat.Info, observer outputs.Observer, cfg *config.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { log := logp.NewLogger(logSelector) - eventsLogger := logp.NewLogger(logSelector) + sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) - eventsLogger = eventsLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") if !cfg.HasField("bulk_max_size") { if err := cfg.SetInt("bulk_max_size", -1, defaultBulkSize); err != nil { @@ -120,7 +120,7 @@ func makeES( var client outputs.NetworkClient client, err = NewClient( log, - eventsLogger, + sensitiveLogger, ClientSettings{ ConnectionSettings: eslegclient.ConnectionSettings{ URL: esURL, diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index a0aadfd53e43..d003849b3025 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -40,7 +40,7 @@ func init() { type fileOutput struct { log *logp.Logger - eventsLogger *logp.Logger + sensitiveLogger *logp.Logger filePath string beat beat.Info observer outputs.Observer @@ -54,7 +54,7 @@ func makeFileout( beat beat.Info, observer outputs.Observer, cfg *c.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { foConfig := defaultConfig() if err := cfg.Unpack(&foConfig); err != nil { @@ -65,14 +65,14 @@ func makeFileout( _ = cfg.SetInt("bulk_max_size", -1, -1) logSelector := "file" - eventsLogger := logp.NewLogger(logSelector) + sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) - eventsLogger = eventsLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") fo := &fileOutput{ log: logp.NewLogger(logSelector), - eventsLogger: eventsLogger, + sensitiveLogger: sensitiveLogger, beat: beat, observer: observer, } @@ -143,7 +143,7 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { out.log.Warnf("Failed to serialize the event: %+v", err) } out.log.Debug("Event logged to events-data log file") - out.eventsLogger.Debugf("Failed event: %v", event) + out.sensitiveLogger.Debugf("Failed event: %v", event) dropped++ continue diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 45f834888060..ae6c5455f0e1 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -42,7 +42,7 @@ import ( type client struct { log *logp.Logger - eventsLogger *logp.Logger + sensitiveLogger *logp.Logger observer outputs.Observer hosts []string topic outil.Selector @@ -83,16 +83,16 @@ func newKafkaClient( headers []header, writer codec.Codec, cfg *sarama.Config, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (*client, error) { - eventsLogger := logp.NewLogger(logSelector) + sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) - eventsLogger = eventsLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") c := &client{ log: logp.NewLogger(logSelector), - eventsLogger: eventsLogger, + sensitiveLogger: sensitiveLogger, observer: observer, hosts: hosts, topic: topic, @@ -238,7 +238,7 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { if err != nil { if c.log.IsDebug() { c.log.Debug("failed event logged to events logger file") - c.eventsLogger.Debugf("failed event: %v", event) + c.sensitiveLogger.Debugf("failed event: %v", event) } return nil, err } diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index 524a51bafe2d..93b3edfa2fb4 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -43,7 +43,7 @@ func makeKafka( beat beat.Info, observer outputs.Observer, cfg *config.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { log := logp.NewLogger(logSelector) log.Debug("initialize kafka output") @@ -73,7 +73,7 @@ func makeKafka( return outputs.Fail(err) } - client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, kConfig.Key, topic, kConfig.Headers, codec, libCfg, eventsLoggerCfg) + client, err := newKafkaClient(observer, hosts, beat.IndexPrefix, kConfig.Key, topic, kConfig.Headers, codec, libCfg, sensitiveLoggerCfg) if err != nil { return outputs.Fail(err) } diff --git a/libbeat/outputs/logstash/logstash.go b/libbeat/outputs/logstash/logstash.go index 466f6b742f9f..cc5a50251648 100644 --- a/libbeat/outputs/logstash/logstash.go +++ b/libbeat/outputs/logstash/logstash.go @@ -41,7 +41,7 @@ func makeLogstash( beat beat.Info, observer outputs.Observer, cfg *conf.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { lsConfig, err := readConfig(cfg, beat) if err != nil { diff --git a/libbeat/outputs/output_reg.go b/libbeat/outputs/output_reg.go index 213daf0298ad..de0d5a2714db 100644 --- a/libbeat/outputs/output_reg.go +++ b/libbeat/outputs/output_reg.go @@ -34,7 +34,7 @@ type Factory func( beat beat.Info, stats Observer, cfg *config.C, - eventsLogger logp.Config) (Group, error) + sensitiveLoggerCfg logp.Config) (Group, error) // IndexManager provides additional index related services to the outputs. type IndexManager interface { @@ -83,7 +83,7 @@ func Load( stats Observer, name string, config *config.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (Group, error) { factory := FindFactory(name) if factory == nil { @@ -93,5 +93,5 @@ func Load( if stats == nil { stats = NewNilObserver() } - return factory(im, info, stats, config, eventsLoggerCfg) + return factory(im, info, stats, config, sensitiveLoggerCfg) } diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index e6b40c63e047..5accd79ff1ef 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -49,7 +49,7 @@ type publishFn func( type client struct { log *logp.Logger - eventsLogger *logp.Logger + sensitiveLogger *logp.Logger *transport.Client observer outputs.Observer index string @@ -76,17 +76,17 @@ func newClient( pass string, db int, key outil.Selector, dt redisDataType, index string, codec codec.Codec, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) *client { logSelector := "redis" - eventsLogger := logp.NewLogger(logSelector) + sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` - eventsLogger = eventsLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(eventsLoggerCfg))) - eventsLogger = eventsLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) + sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") return &client{ log: logp.NewLogger(logSelector), - eventsLogger: eventsLogger, + sensitiveLogger: sensitiveLogger, Client: tc, observer: observer, timeout: timeout, @@ -237,7 +237,7 @@ func (c *client) publishEventsBulk(conn redis.Conn, command string) publishFn { args := make([]interface{}, 1, len(data)+1) args[0] = dest - okEvents, args := serializeEvents(c.log, c.eventsLogger, args, 1, data, c.index, c.codec) + okEvents, args := serializeEvents(c.log, c.sensitiveLogger, args, 1, data, c.index, c.codec) c.observer.Dropped(len(data) - len(okEvents)) if (len(args) - 1) == 0 { return nil, nil @@ -263,7 +263,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF return func(key outil.Selector, data []publisher.Event) ([]publisher.Event, error) { var okEvents []publisher.Event serialized := make([]interface{}, 0, len(data)) - okEvents, serialized = serializeEvents(c.log, c.eventsLogger, serialized, 0, data, c.index, c.codec) + okEvents, serialized = serializeEvents(c.log, c.sensitiveLogger, serialized, 0, data, c.index, c.codec) c.observer.Dropped(len(data) - len(okEvents)) if len(serialized) == 0 { return nil, nil @@ -318,7 +318,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF func serializeEvents( log *logp.Logger, - eventsLogger *logp.Logger, + sensitiveLogger *logp.Logger, to []interface{}, i int, data []publisher.Event, @@ -331,7 +331,7 @@ func serializeEvents( serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { log.Errorf("Encoding event failed with error: %+v. Look for events-data log file to view the event", err) - eventsLogger.Debugf("Failed event: %v", d.Content) + sensitiveLogger.Debugf("Failed event: %v", d.Content) goto failLoop } @@ -349,7 +349,7 @@ failLoop: serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { log.Errorf("Encoding event failed with error: %+v. Look for events-data log file to view the event", err) - eventsLogger.Debugf("Failed event: %v", d.Content) + sensitiveLogger.Debugf("Failed event: %v", d.Content) i++ continue } diff --git a/libbeat/outputs/redis/redis.go b/libbeat/outputs/redis/redis.go index 5f902620a2ab..8b80cfc52060 100644 --- a/libbeat/outputs/redis/redis.go +++ b/libbeat/outputs/redis/redis.go @@ -52,7 +52,7 @@ func makeRedis( beat beat.Info, observer outputs.Observer, cfg *config.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { if !cfg.HasField("index") { @@ -163,7 +163,7 @@ func makeRedis( } client := newClient(conn, observer, rConfig.Timeout, - pass, rConfig.Db, key, dataType, rConfig.Index, enc, eventsLoggerCfg) + pass, rConfig.Db, key, dataType, rConfig.Index, enc, sensitiveLoggerCfg) clients[i] = newBackoffClient(client, rConfig.Backoff.Init, rConfig.Backoff.Max) } diff --git a/libbeat/outputs/shipper/shipper.go b/libbeat/outputs/shipper/shipper.go index bf0e77691f9e..8caa97b25f28 100644 --- a/libbeat/outputs/shipper/shipper.go +++ b/libbeat/outputs/shipper/shipper.go @@ -92,7 +92,7 @@ func makeShipper( beat beat.Info, observer outputs.Observer, cfg *conf.C, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, ) (outputs.Group, error) { config := defaultConfig() diff --git a/libbeat/publisher/pipeline/controller.go b/libbeat/publisher/pipeline/controller.go index bcaaca438fd7..5b49bccfb2d4 100644 --- a/libbeat/publisher/pipeline/controller.go +++ b/libbeat/publisher/pipeline/controller.go @@ -180,7 +180,7 @@ func (c *outputController) Set(outGrp outputs.Group) { // Reload the output func (c *outputController) Reload( cfg *reload.ConfigWithMeta, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, outFactory func(outputs.Observer, conf.Namespace, logp.Config) (outputs.Group, error), ) error { outCfg := conf.Namespace{} @@ -192,7 +192,7 @@ func (c *outputController) Reload( output, err := loadOutput(c.monitors, func(stats outputs.Observer) (string, outputs.Group, error) { name := outCfg.Name() - out, err := outFactory(stats, outCfg, eventsLoggerCfg) + out, err := outFactory(stats, outCfg, sensitiveLoggerCfg) return name, out, err }) if err != nil { diff --git a/libbeat/publisher/pipeline/pipeline.go b/libbeat/publisher/pipeline/pipeline.go index 7076b379fe58..3414950538fc 100644 --- a/libbeat/publisher/pipeline/pipeline.go +++ b/libbeat/publisher/pipeline/pipeline.go @@ -111,7 +111,7 @@ const ( type OutputReloader interface { Reload( cfg *reload.ConfigWithMeta, - eventsLoggerCfg logp.Config, + sensitiveLoggerCfg logp.Config, factory func(outputs.Observer, conf.Namespace, logp.Config) (outputs.Group, error), ) error } diff --git a/libbeat/publisher/pipeline/stress/out.go b/libbeat/publisher/pipeline/stress/out.go index fc51f24e7d57..5fa5260861a6 100644 --- a/libbeat/publisher/pipeline/stress/out.go +++ b/libbeat/publisher/pipeline/stress/out.go @@ -56,7 +56,7 @@ func init() { outputs.RegisterType("test", makeTestOutput) } -func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *conf.C, eventsLoggerCfg logp.Config) (outputs.Group, error) { +func makeTestOutput(_ outputs.IndexManager, beat beat.Info, observer outputs.Observer, cfg *conf.C, sensitiveLoggerCfg logp.Config) (outputs.Group, error) { config := defaultTestOutputConfig if err := cfg.Unpack(&config); err != nil { return outputs.Fail(err) diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 4420d4a2dc6e..474656912e73 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2403,14 +2403,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-events-data + #name: metricbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 3925d12b82c8..84fd87726aca 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -147,14 +147,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-events-data + #name: metricbeat-sensitive # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 2c51bdb8c91b..0d0edfe891ae 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2019,14 +2019,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-events-data + #name: packetbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index a5026fdbb353..0590a19bae8e 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -275,14 +275,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-events-data + #name: packetbeat-sensitive # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 78189b2e3c12..10a0a06b3763 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1435,14 +1435,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-events-data + #name: winlogbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index 012bee36190b..4de803e9d067 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -160,14 +160,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-events-data + #name: winlogbeat-sensitive # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 1fc28d0cb4c7..b821841d121f 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1609,14 +1609,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-events-data + #name: auditbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/auditbeat/auditbeat.yml b/x-pack/auditbeat/auditbeat.yml index 0e1dbb5c2c28..d6511d734245 100644 --- a/x-pack/auditbeat/auditbeat.yml +++ b/x-pack/auditbeat/auditbeat.yml @@ -201,14 +201,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-events-data + #name: auditbeat-sensitive # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go index 774feda6765c..a2c183616fb2 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go +++ b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go @@ -70,11 +70,11 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * // Get the default/current logging configuration // we need some defaults to be populates otherwise Unpack will // fail - eventsLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) + sensitiveLoggerCfg := logp.DefaultConfig(configure.GetEnvironment()) // Ensure the default filename is set - if eventsLoggerCfg.Files.Name == "" { - eventsLoggerCfg.Files.Name = "dockerlogbeat-events-data" + if sensitiveLoggerCfg.Files.Name == "" { + sensitiveLoggerCfg.Files.Name = "dockerlogbeat-events-data" } pipeline, err := pipeline.LoadWithSettings( @@ -87,7 +87,7 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * pipelineCfg, func(stat outputs.Observer) (string, outputs.Group, error) { cfg := config.Output - out, err := outputs.Load(idxMgr, info, stat, cfg.Name(), cfg.Config(), eventsLoggerCfg) + out, err := outputs.Load(idxMgr, info, stat, cfg.Name(), cfg.Config(), sensitiveLoggerCfg) return cfg.Name(), out, err }, settings, diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 5bb862b98d05..7b5a6b0b1806 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -5025,14 +5025,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-events-data + #name: filebeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index 3add6f54a11a..863abb33bb7c 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -191,14 +191,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-events-data + #name: filebeat-sensitive # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 4e12aa9ace95..4394c1c3b126 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1273,14 +1273,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/functionbeat # The name of the files where the logs are written to. - #name: functionbeat-events-data + #name: functionbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/functionbeat/functionbeat.yml b/x-pack/functionbeat/functionbeat.yml index 0544fec54fd4..52aec30c5417 100644 --- a/x-pack/functionbeat/functionbeat.yml +++ b/x-pack/functionbeat/functionbeat.yml @@ -370,14 +370,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/functionbeat # The name of the files where the logs are written to. - #name: functionbeat-events-data + #name: functionbeat-sensitive # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 1c7317042850..fceb17e7d777 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1645,14 +1645,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-events-data + #name: heartbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/heartbeat/heartbeat.yml b/x-pack/heartbeat/heartbeat.yml index 0b28eec374e0..be707ad671e4 100644 --- a/x-pack/heartbeat/heartbeat.yml +++ b/x-pack/heartbeat/heartbeat.yml @@ -157,14 +157,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-events-data + #name: heartbeat-sensitive # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 1bd277059d15..de24f183a73f 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2964,14 +2964,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-events-data + #name: metricbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml index 3925d12b82c8..84fd87726aca 100644 --- a/x-pack/metricbeat/metricbeat.yml +++ b/x-pack/metricbeat/metricbeat.yml @@ -147,14 +147,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-events-data + #name: metricbeat-sensitive # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index 9b0c50a1b0b0..a66d1947783d 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -992,14 +992,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/osquerybeat # The name of the files where the logs are written to. - #name: osquerybeat-events-data + #name: osquerybeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/osquerybeat/osquerybeat.yml b/x-pack/osquerybeat/osquerybeat.yml index e187ba70c1e6..ac8e69d61191 100644 --- a/x-pack/osquerybeat/osquerybeat.yml +++ b/x-pack/osquerybeat/osquerybeat.yml @@ -133,14 +133,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/osquerybeat # The name of the files where the logs are written to. - #name: osquerybeat-events-data + #name: osquerybeat-sensitive # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 2c51bdb8c91b..0d0edfe891ae 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2019,14 +2019,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-events-data + #name: packetbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index a5026fdbb353..0590a19bae8e 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -275,14 +275,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-events-data + #name: packetbeat-sensitive # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index b4980c973bc8..0c0910b30d03 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1437,14 +1437,14 @@ logging.files: # Having a different log file for raw events also prevents event data # from drowning out the regular log files. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-events-data + #name: winlogbeat-sensitive # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/winlogbeat/winlogbeat.yml b/x-pack/winlogbeat/winlogbeat.yml index c88939331f7b..5a43d23d9091 100644 --- a/x-pack/winlogbeat/winlogbeat.yml +++ b/x-pack/winlogbeat/winlogbeat.yml @@ -161,14 +161,14 @@ processors: # log messages, a different log file, only for log entries containing raw events, # is used. It will use the same level, selectors and all other configurations # from the default logger, but it will have it's own file configuration. -#logging.events: +#logging.sensitive: #files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-events-data + #name: winlogbeat-sensitive # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring From 8b611971201a6f290cd9004ef0e8247e2a8a8b24 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Mon, 22 Jan 2024 19:16:37 +0100 Subject: [PATCH 18/21] remove diagnostics hook --- libbeat/cmd/instance/beat.go | 45 ------------------------------------ 1 file changed, 45 deletions(-) diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 0f9b1816b1a7..6a9a8492331e 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -18,7 +18,6 @@ package instance import ( - "bytes" "context" cryptRand "crypto/rand" "encoding/json" @@ -32,7 +31,6 @@ import ( "net" "os" "os/user" - "path/filepath" "runtime" "runtime/debug" "strconv" @@ -404,11 +402,6 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { sensitiveLoggerCfg.Files.Name = sensitiveLoggerCfg.Files.Name + "-events-data" } - // Now that the events logger is configured, we can register it's diagnostic - // hook - b.Manager.RegisterDiagnosticHook("events log", - "log files containing raw events", "events_log.ndjson", - "application/x-ndjson", b.eventsLogDiagnosticsHook(sensitiveLoggerCfg)) outputFactory := b.makeOutputFactory(b.Config.Output, sensitiveLoggerCfg) settings := pipeline.Settings{ Processors: b.processors, @@ -434,44 +427,6 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { return beater, nil } -func (b *Beat) eventsLogDiagnosticsHook(logCfg logp.Config) func() []byte { - // Setup a no-op function to return in case of an error - data := []byte{} - fn := func() []byte { - return data - } - - glob := fmt.Sprintf("%s*.ndjson", - paths.Resolve( - paths.Logs, - filepath.Join( - logCfg.Files.Path, - logCfg.LogFilename(), - ))) - - files, err := filepath.Glob(glob) - if err != nil { - logp.Warn("could not get 'event log' files: %s", err) - return fn - } - - filesData := [][]byte{} - fn = func() []byte { - return bytes.Join(filesData, []byte{}) - } - - for _, f := range files { - logData, err := os.ReadFile(f) - if err != nil { - logp.Warn("could not read event log file '%s': %s", f, err) - return fn - } - filesData = append(filesData, logData) - } - - return fn -} - func (b *Beat) launch(settings Settings, bt beat.Creator) error { defer func() { _ = logp.Sync() From e97f5888cc1ca9d188db744a43465f6daaec214b Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 24 Jan 2024 10:31:15 +0100 Subject: [PATCH 19/21] Update log fields and replace events-data by sensitive-data --- .../{events_log_file_test.go => sensitive_log_file_test.go} | 4 ++-- libbeat/_meta/config/logging.reference.yml.tmpl | 2 +- libbeat/_meta/config/logging.yml.tmpl | 2 +- libbeat/cmd/instance/beat.go | 2 +- libbeat/outputs/elasticsearch/client.go | 6 +++--- libbeat/outputs/elasticsearch/elasticsearch.go | 2 +- libbeat/outputs/fileout/file.go | 4 ++-- libbeat/outputs/kafka/client.go | 2 +- libbeat/outputs/redis/client.go | 6 +++--- x-pack/dockerlogbeat/pipelinemanager/libbeattools.go | 2 +- 10 files changed, 16 insertions(+), 16 deletions(-) rename filebeat/tests/integration/{events_log_file_test.go => sensitive_log_file_test.go} (96%) diff --git a/filebeat/tests/integration/events_log_file_test.go b/filebeat/tests/integration/sensitive_log_file_test.go similarity index 96% rename from filebeat/tests/integration/events_log_file_test.go rename to filebeat/tests/integration/sensitive_log_file_test.go index ecc9e80a068d..9ddc504dd6ef 100644 --- a/filebeat/tests/integration/events_log_file_test.go +++ b/filebeat/tests/integration/sensitive_log_file_test.go @@ -60,7 +60,7 @@ logging: files: events: files: - name: filebeat-events-data + name: filebeat-sensitive-data ` func TestEventsLoggerESOutput(t *testing.T) { @@ -106,7 +106,7 @@ func TestEventsLoggerESOutput(t *testing.T) { }, time.Minute, 100*time.Millisecond, fmt.Sprintf("String '%s' not found on Filebeat logs", msg)) - glob := filepath.Join(filebeat.TempDir(), "filebeat-events-data*.ndjson") + glob := filepath.Join(filebeat.TempDir(), "filebeat-sensitive-data*.ndjson") files, err := filepath.Glob(glob) if err != nil { t.Fatalf("could not read files matching glob '%s': %s", glob, err) diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index 1cc018bbf3e1..2b65512d9998 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -84,7 +84,7 @@ logging.files: #path: /var/log/{{.BeatName}} # The name of the files where the logs are written to. - #name: {{.BeatName}}-sensitive + #name: {{.BeatName}}-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/libbeat/_meta/config/logging.yml.tmpl b/libbeat/_meta/config/logging.yml.tmpl index 8a1f5d18cc84..d56a3c8fd0a6 100644 --- a/libbeat/_meta/config/logging.yml.tmpl +++ b/libbeat/_meta/config/logging.yml.tmpl @@ -21,4 +21,4 @@ #path: /var/log/{{.BeatName}} # The name of the files where the logs are written to. - #name: {{.BeatName}}-sensitive + #name: {{.BeatName}}-sensitive-data diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 6a9a8492331e..54e0fb8a6c52 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -399,7 +399,7 @@ func (b *Beat) createBeater(bt beat.Creator) (beat.Beater, error) { if sensitiveLoggerCfg.Files.Name == "" { sensitiveLoggerCfg.Files.Name = b.Info.Beat // Append the name so the files do not overwrite themselves. - sensitiveLoggerCfg.Files.Name = sensitiveLoggerCfg.Files.Name + "-events-data" + sensitiveLoggerCfg.Files.Name = sensitiveLoggerCfg.Files.Name + "-sensitive-data" } outputFactory := b.makeOutputFactory(b.Config.Output, sensitiveLoggerCfg) diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 22df451600e4..048b90e487ac 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -437,11 +437,11 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat result, _ := data[i].Content.Meta.HasKey(dead_letter_marker_field) if result { stats.nonIndexable++ - client.log.Errorf("Can't deliver to dead letter index event (status=%v). Look for events-data log file to view the event and cause.", status) + client.log.Errorf("Can't deliver to dead letter index event (status=%v). Look for sensitive-data log file to view the event and cause.", status) client.sensitiveLogger.Errorf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) // poison pill - this will clog the pipeline if the underlying failure is non transient. } else if client.NonIndexableAction == dead_letter_index { - client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Look for events-data log file to view the event and cause.", status) + client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Look for sensitive-data log file to view the event and cause.", status) client.sensitiveLogger.Warnf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) if data[i].Content.Meta == nil { data[i].Content.Meta = mapstr.M{ @@ -457,7 +457,7 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat } } else { // drop stats.nonIndexable++ - client.log.Warnf("Cannot index event (status=%v): dropping event! Look for events-data log file to view the event and cause.", status) + client.log.Warnf("Cannot index event (status=%v): dropping event! Look for sensitive-data log file to view the event and cause.", status) client.sensitiveLogger.Warnf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) continue } diff --git a/libbeat/outputs/elasticsearch/elasticsearch.go b/libbeat/outputs/elasticsearch/elasticsearch.go index 10d41279e50c..e44548c826bb 100644 --- a/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/libbeat/outputs/elasticsearch/elasticsearch.go @@ -46,7 +46,7 @@ func makeES( sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) - sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") if !cfg.HasField("bulk_max_size") { if err := cfg.SetInt("bulk_max_size", -1, defaultBulkSize); err != nil { diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index d003849b3025..2cd92b423556 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -68,7 +68,7 @@ func makeFileout( sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) - sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") fo := &fileOutput{ log: logp.NewLogger(logSelector), @@ -142,7 +142,7 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { } else { out.log.Warnf("Failed to serialize the event: %+v", err) } - out.log.Debug("Event logged to events-data log file") + out.log.Debug("Event logged to sensitive-data log file") out.sensitiveLogger.Debugf("Failed event: %v", event) dropped++ diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index ae6c5455f0e1..6d1baf194527 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -88,7 +88,7 @@ func newKafkaClient( sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) - sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") c := &client{ log: logp.NewLogger(logSelector), diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index 5accd79ff1ef..476ba9f8cd9a 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -82,7 +82,7 @@ func newClient( sensitiveLogger := logp.NewLogger(logSelector) // Set a new Output so it writes to a different file than `log` sensitiveLogger = sensitiveLogger.WithOptions(zap.WrapCore(logp.WithFileOrStderrOutput(sensitiveLoggerCfg))) - sensitiveLogger = sensitiveLogger.With("logger.type", "sensitive") + sensitiveLogger = sensitiveLogger.With("log.type", "sensitive") return &client{ log: logp.NewLogger(logSelector), @@ -330,7 +330,7 @@ func serializeEvents( for _, d := range data { serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v. Look for events-data log file to view the event", err) + log.Errorf("Encoding event failed with error: %+v. Look for sensitive-data log file to view the event", err) sensitiveLogger.Debugf("Failed event: %v", d.Content) goto failLoop } @@ -348,7 +348,7 @@ failLoop: for _, d := range rest { serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v. Look for events-data log file to view the event", err) + log.Errorf("Encoding event failed with error: %+v. Look for sensitive-data log file to view the event", err) sensitiveLogger.Debugf("Failed event: %v", d.Content) i++ continue diff --git a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go index a2c183616fb2..5fd4c8eb83bd 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go +++ b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go @@ -74,7 +74,7 @@ func loadNewPipeline(logOptsConfig ContainerOutputConfig, hostname string, log * // Ensure the default filename is set if sensitiveLoggerCfg.Files.Name == "" { - sensitiveLoggerCfg.Files.Name = "dockerlogbeat-events-data" + sensitiveLoggerCfg.Files.Name = "dockerlogbeat-sensitive-data" } pipeline, err := pipeline.LoadWithSettings( From f573aca9cec29d49572bb5d2c0ee49453f6f17ea Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 24 Jan 2024 10:51:34 +0100 Subject: [PATCH 20/21] update protobuf to 3.19.5 There was some dependency conflict with Python protobuf 3.19.4 and 3.19.5 when running `make update`. To solve the conflict I updated all usages to 3.19.5. --- NOTICE.txt | 4 ++-- auditbeat/tests/system/requirements.txt | 2 +- dev-tools/requirements.txt | 2 +- heartbeat/tests/system/requirements.txt | 2 +- .../module/kubernetes/_meta/terraform/eks/requirements.txt | 2 +- metricbeat/tests/system/requirements.txt | 2 +- packetbeat/tests/system/gen/memcache/requirements.txt | 2 +- x-pack/functionbeat/tests/system/requirements.txt | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index fdd11f31b1cc..5b7d35e78c22 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12701,11 +12701,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/belimawr/elastic-agent-libs -Version: v0.2.9-0.20240116105334-25f61a14ad41 +Version: v0.2.9-0.20240122163001-efb117578ab2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/belimawr/elastic-agent-libs@v0.2.9-0.20240116105334-25f61a14ad41/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/belimawr/elastic-agent-libs@v0.2.9-0.20240122163001-efb117578ab2/LICENSE: Apache License Version 2.0, January 2004 diff --git a/auditbeat/tests/system/requirements.txt b/auditbeat/tests/system/requirements.txt index c2399b66f80b..a6da4ed167de 100644 --- a/auditbeat/tests/system/requirements.txt +++ b/auditbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/dev-tools/requirements.txt b/dev-tools/requirements.txt index f69927dbe3c6..abd269465e96 100644 --- a/dev-tools/requirements.txt +++ b/dev-tools/requirements.txt @@ -1,3 +1,3 @@ elasticsearch requests -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/heartbeat/tests/system/requirements.txt b/heartbeat/tests/system/requirements.txt index c2399b66f80b..a6da4ed167de 100644 --- a/heartbeat/tests/system/requirements.txt +++ b/heartbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt index 7402ff16caa1..409d7406632c 100644 --- a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt +++ b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt @@ -10,4 +10,4 @@ rsa==4.7.2 s3transfer==0.3.3 six==1.14.0 urllib3==1.26.5 -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/metricbeat/tests/system/requirements.txt b/metricbeat/tests/system/requirements.txt index 98713863fc0d..e58c701d9db2 100644 --- a/metricbeat/tests/system/requirements.txt +++ b/metricbeat/tests/system/requirements.txt @@ -1,4 +1,4 @@ kafka-python==1.4.3 elasticsearch==7.1.0 semver==2.8.1 -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/packetbeat/tests/system/gen/memcache/requirements.txt b/packetbeat/tests/system/gen/memcache/requirements.txt index 1666df74b640..a1dbb5b952c4 100644 --- a/packetbeat/tests/system/gen/memcache/requirements.txt +++ b/packetbeat/tests/system/gen/memcache/requirements.txt @@ -1,2 +1,2 @@ pylibmc -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 diff --git a/x-pack/functionbeat/tests/system/requirements.txt b/x-pack/functionbeat/tests/system/requirements.txt index c2399b66f80b..a6da4ed167de 100644 --- a/x-pack/functionbeat/tests/system/requirements.txt +++ b/x-pack/functionbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 From 3fc66fc7cf6a530b48134f89ad4ba00a1071f635 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 24 Jan 2024 10:53:03 +0100 Subject: [PATCH 21/21] update default sensitive log file name on configuration files --- auditbeat/auditbeat.reference.yml | 2 +- auditbeat/auditbeat.yml | 2 +- filebeat/filebeat.reference.yml | 2 +- filebeat/filebeat.yml | 2 +- heartbeat/heartbeat.reference.yml | 2 +- heartbeat/heartbeat.yml | 2 +- metricbeat/metricbeat.reference.yml | 2 +- metricbeat/metricbeat.yml | 2 +- packetbeat/packetbeat.reference.yml | 2 +- packetbeat/packetbeat.yml | 2 +- winlogbeat/winlogbeat.reference.yml | 2 +- winlogbeat/winlogbeat.yml | 2 +- x-pack/auditbeat/auditbeat.reference.yml | 2 +- x-pack/auditbeat/auditbeat.yml | 2 +- x-pack/filebeat/filebeat.reference.yml | 2 +- x-pack/filebeat/filebeat.yml | 2 +- x-pack/functionbeat/functionbeat.reference.yml | 2 +- x-pack/functionbeat/functionbeat.yml | 2 +- x-pack/heartbeat/heartbeat.reference.yml | 2 +- x-pack/heartbeat/heartbeat.yml | 2 +- x-pack/metricbeat/metricbeat.reference.yml | 2 +- x-pack/metricbeat/metricbeat.yml | 2 +- x-pack/osquerybeat/osquerybeat.reference.yml | 2 +- x-pack/osquerybeat/osquerybeat.yml | 2 +- x-pack/packetbeat/packetbeat.reference.yml | 2 +- x-pack/packetbeat/packetbeat.yml | 2 +- x-pack/winlogbeat/winlogbeat.reference.yml | 2 +- x-pack/winlogbeat/winlogbeat.yml | 2 +- 28 files changed, 28 insertions(+), 28 deletions(-) diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 2483daa33c4f..83d5cf61ab06 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1560,7 +1560,7 @@ logging.files: #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-sensitive + #name: auditbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/auditbeat/auditbeat.yml b/auditbeat/auditbeat.yml index ba867c0f20fa..dc63f2ab1485 100644 --- a/auditbeat/auditbeat.yml +++ b/auditbeat/auditbeat.yml @@ -181,7 +181,7 @@ processors: #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-sensitive + #name: auditbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 2577942ed124..dc39590777e0 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2656,7 +2656,7 @@ logging.files: #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-sensitive + #name: filebeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index 863abb33bb7c..810604dbe1dd 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -198,7 +198,7 @@ processors: #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-sensitive + #name: filebeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index fceb17e7d777..da19a7c7db3a 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1652,7 +1652,7 @@ logging.files: #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-sensitive + #name: heartbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index be707ad671e4..3c1f3756420b 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -164,7 +164,7 @@ processors: #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-sensitive + #name: heartbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 474656912e73..53208c0730a7 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2410,7 +2410,7 @@ logging.files: #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-sensitive + #name: metricbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 84fd87726aca..2884d7aa617b 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -154,7 +154,7 @@ processors: #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-sensitive + #name: metricbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 0d0edfe891ae..4346ef2ea5e1 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2026,7 +2026,7 @@ logging.files: #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-sensitive + #name: packetbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index 0590a19bae8e..bd111eddab6e 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -282,7 +282,7 @@ processors: #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-sensitive + #name: packetbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 10a0a06b3763..d5f8b5383a0c 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1442,7 +1442,7 @@ logging.files: #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-sensitive + #name: winlogbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index 4de803e9d067..791fdd2a78c7 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -167,7 +167,7 @@ processors: #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-sensitive + #name: winlogbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index b821841d121f..23279e9f077f 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1616,7 +1616,7 @@ logging.files: #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-sensitive + #name: auditbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/auditbeat/auditbeat.yml b/x-pack/auditbeat/auditbeat.yml index d6511d734245..ca5a0f6b6715 100644 --- a/x-pack/auditbeat/auditbeat.yml +++ b/x-pack/auditbeat/auditbeat.yml @@ -208,7 +208,7 @@ processors: #path: /var/log/auditbeat # The name of the files where the logs are written to. - #name: auditbeat-sensitive + #name: auditbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 7b5a6b0b1806..5f97731ecd58 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -5032,7 +5032,7 @@ logging.files: #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-sensitive + #name: filebeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index 863abb33bb7c..810604dbe1dd 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -198,7 +198,7 @@ processors: #path: /var/log/filebeat # The name of the files where the logs are written to. - #name: filebeat-sensitive + #name: filebeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 4394c1c3b126..1a563d5be0d7 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1280,7 +1280,7 @@ logging.files: #path: /var/log/functionbeat # The name of the files where the logs are written to. - #name: functionbeat-sensitive + #name: functionbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/functionbeat/functionbeat.yml b/x-pack/functionbeat/functionbeat.yml index 52aec30c5417..39034b2496db 100644 --- a/x-pack/functionbeat/functionbeat.yml +++ b/x-pack/functionbeat/functionbeat.yml @@ -377,7 +377,7 @@ processors: #path: /var/log/functionbeat # The name of the files where the logs are written to. - #name: functionbeat-sensitive + #name: functionbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index fceb17e7d777..da19a7c7db3a 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1652,7 +1652,7 @@ logging.files: #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-sensitive + #name: heartbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/heartbeat/heartbeat.yml b/x-pack/heartbeat/heartbeat.yml index be707ad671e4..3c1f3756420b 100644 --- a/x-pack/heartbeat/heartbeat.yml +++ b/x-pack/heartbeat/heartbeat.yml @@ -164,7 +164,7 @@ processors: #path: /var/log/heartbeat # The name of the files where the logs are written to. - #name: heartbeat-sensitive + #name: heartbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index de24f183a73f..95b2f101bcb6 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2971,7 +2971,7 @@ logging.files: #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-sensitive + #name: metricbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml index 84fd87726aca..2884d7aa617b 100644 --- a/x-pack/metricbeat/metricbeat.yml +++ b/x-pack/metricbeat/metricbeat.yml @@ -154,7 +154,7 @@ processors: #path: /var/log/metricbeat # The name of the files where the logs are written to. - #name: metricbeat-sensitive + #name: metricbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index a66d1947783d..78690d219ef8 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -999,7 +999,7 @@ logging.files: #path: /var/log/osquerybeat # The name of the files where the logs are written to. - #name: osquerybeat-sensitive + #name: osquerybeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/osquerybeat/osquerybeat.yml b/x-pack/osquerybeat/osquerybeat.yml index ac8e69d61191..1e5fa364f908 100644 --- a/x-pack/osquerybeat/osquerybeat.yml +++ b/x-pack/osquerybeat/osquerybeat.yml @@ -140,7 +140,7 @@ processors: #path: /var/log/osquerybeat # The name of the files where the logs are written to. - #name: osquerybeat-sensitive + #name: osquerybeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 0d0edfe891ae..4346ef2ea5e1 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2026,7 +2026,7 @@ logging.files: #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-sensitive + #name: packetbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index 0590a19bae8e..bd111eddab6e 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -282,7 +282,7 @@ processors: #path: /var/log/packetbeat # The name of the files where the logs are written to. - #name: packetbeat-sensitive + #name: packetbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 0c0910b30d03..3c14ac0e3854 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1444,7 +1444,7 @@ logging.files: #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-sensitive + #name: winlogbeat-sensitive-data # Configure log file size limit. If the limit is reached, log file will be # automatically rotated. diff --git a/x-pack/winlogbeat/winlogbeat.yml b/x-pack/winlogbeat/winlogbeat.yml index 5a43d23d9091..efafa909a617 100644 --- a/x-pack/winlogbeat/winlogbeat.yml +++ b/x-pack/winlogbeat/winlogbeat.yml @@ -168,7 +168,7 @@ processors: #path: /var/log/winlogbeat # The name of the files where the logs are written to. - #name: winlogbeat-sensitive + #name: winlogbeat-sensitive-data # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring