From 69e761234990162e1ffb73f293793b1557d62d7f Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 28 Jan 2025 09:35:50 -0500 Subject: [PATCH 01/17] Add filesource provider (#6587) The provider watches for changes of the files and updates the values of the variables when the content of the file changes. --- .../1737666699-Add-filesource-provider.yaml | 34 +++ internal/pkg/agent/cmd/include.go | 1 + .../providers/filesource/filesource.go | 286 ++++++++++++++++++ .../providers/filesource/filesource_test.go | 183 +++++++++++ 4 files changed, 504 insertions(+) create mode 100644 changelog/fragments/1737666699-Add-filesource-provider.yaml create mode 100644 internal/pkg/composable/providers/filesource/filesource.go create mode 100644 internal/pkg/composable/providers/filesource/filesource_test.go diff --git a/changelog/fragments/1737666699-Add-filesource-provider.yaml b/changelog/fragments/1737666699-Add-filesource-provider.yaml new file mode 100644 index 00000000000..6ee40c0e8db --- /dev/null +++ b/changelog/fragments/1737666699-Add-filesource-provider.yaml @@ -0,0 +1,34 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add filesource provider + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +description: | + filesource provider watches for changes of the files and updates the values of the variables + when the content of the file changes. + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6587 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/6362 diff --git a/internal/pkg/agent/cmd/include.go b/internal/pkg/agent/cmd/include.go index 6750727626a..ab2ce160682 100644 --- a/internal/pkg/agent/cmd/include.go +++ b/internal/pkg/agent/cmd/include.go @@ -9,6 +9,7 @@ import ( _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/agent" _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/docker" _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/env" + _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/filesource" _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/host" _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/kubernetes" _ "github.com/elastic/elastic-agent/internal/pkg/composable/providers/kubernetesleaderelection" diff --git a/internal/pkg/composable/providers/filesource/filesource.go b/internal/pkg/composable/providers/filesource/filesource.go new file mode 100644 index 00000000000..dbda0298c46 --- /dev/null +++ b/internal/pkg/composable/providers/filesource/filesource.go @@ -0,0 +1,286 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package filesource + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "slices" + "strings" + + "github.com/fsnotify/fsnotify" + + "github.com/elastic/elastic-agent/internal/pkg/composable" + "github.com/elastic/elastic-agent/internal/pkg/config" + corecomp "github.com/elastic/elastic-agent/internal/pkg/core/composable" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func init() { + // filesource provider reads and watches for changes on files that are defined in the provider configuration. + // + // To be notified when a file is change the provider will watch the parent directory of the file so if the file + // is replaced that it will read the new contents. If a file doesn't exist or the provider is unable to read + // the file then it will report the value as an empty string. + // + // If the provided path happens to be a directory then it just report the value as an empty string. + composable.Providers.MustAddContextProvider("filesource", ContextProviderBuilder) +} + +const ( + DefaultMaxSize = 4 * 1024 // 4KiB +) + +type fileSourceConfig struct { + Type string `config:"type"` + Path string `config:"path"` +} + +type providerConfig struct { + Enabled bool `config:"enabled"` // handled by composable manager (but here to show that it is part of the config) + Sources map[string]*fileSourceConfig `config:"sources"` + MaxSize int `config:"max_size"` +} + +type contextProvider struct { + logger *logger.Logger + + cfg providerConfig +} + +// Run runs the filesource context provider. +func (c *contextProvider) Run(ctx context.Context, comm corecomp.ContextProviderComm) error { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("failed to create watcher: %w", err) + } + defer watcher.Close() + + // invert the mapping to map paths to source names + inverted := make(map[string][]string, len(c.cfg.Sources)) + for sourceName, sourceCfg := range c.cfg.Sources { + sources, ok := inverted[sourceCfg.Path] + if !ok { + sources = []string{sourceName} + } else { + sources = append(sources, sourceName) + } + inverted[sourceCfg.Path] = sources + } + + // determine the paths to watch (watch is performed on the directories that contain the file) + // + // you cannot register the same directory multiple times so this ensures its only registered once + paths := make([]string, 0, len(c.cfg.Sources)) + for _, cfg := range c.cfg.Sources { + parent := filepath.Dir(cfg.Path) + if !slices.Contains(paths, parent) { + paths = append(paths, parent) + } + } + for _, path := range paths { + err = watcher.Add(path) + if err != nil { + return fmt.Errorf("failed to watch path %q: %w", path, err) + } + } + + // read the initial values after the watch has started + // this ensures that if the value changed between this code and the loop below + // the updated file changes will not be missed + current := make(map[string]interface{}, len(c.cfg.Sources)) + readAll := func() error { + for path, sources := range inverted { + value := c.readContents(path) + for _, source := range sources { + current[source] = value + } + } + err = comm.Set(current) + if err != nil { + return fmt.Errorf("failed to set current context: %w", err) + } + return nil + } + err = readAll() + if err != nil { + // context for the error already added + return err + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err, ok := <-watcher.Errors: + if ok { + c.logger.Errorf("file watcher errored: %s", err) + if errors.Is(err, fsnotify.ErrEventOverflow) { + // the queue is full and some events have been dropped + // at this point we don't know what has changed + // clear the queue of events and read all again + c.logger.Debug("draining file watcher queue") + drainQueue(watcher.Events) + c.logger.Infof("reading all sources to handle overflow") + err = readAll() + if err != nil { + // context for the error already added + c.logger.Error(err) + } + } + } + case e, ok := <-watcher.Events: + if ok { + path := filepath.Clean(e.Name) + // Windows paths are case-insensitive + if runtime.GOOS == "windows" { + path = strings.ToLower(path) + } + sources, ok := inverted[path] + if !ok { + // watching the directory, it can contain files that we are not watching + // ignore these events unless we are actively watching this file + continue + } + + switch { + case e.Op&(fsnotify.Create|fsnotify.Write|fsnotify.Remove) != 0: + // file was created, updated, or deleted (update the value) + changed := false + value := c.readContents(path) + for _, source := range sources { + previous := current[source] + if previous != value { + current[source] = value + changed = true + } + } + if changed { + err = comm.Set(current) + if err != nil { + return fmt.Errorf("failed to set current context from notify event: %w", err) + } + } + } + } + } + } +} + +// readContents reads the contents of the file but places a cap on the size of the data that +// is allowed to be read. If the file is larger than the max size then it will only read up to +// the maximum size. +func (c *contextProvider) readContents(path string) string { + maxSize := c.cfg.MaxSize + if maxSize <= 0 { + maxSize = DefaultMaxSize + } + + f, err := os.Open(path) + if err != nil { + c.logger.Errorf("failed to open file %q: %s", path, err) + } + defer f.Close() + + // determine the size needed in the buffer to read + var size int + if info, err := f.Stat(); err == nil { + size64 := info.Size() + if int64(int(size64)) == size64 { + size = int(size64) + } + } + size++ // one byte for final read at EOF + + // don't allow more than maxSize + if size > maxSize { + size = maxSize + } + + // If a file claims a small size, read at least 512 bytes. + // In particular, files in Linux's /proc claim size 0 but + // then do not work right if read in small pieces, + // so an initial read of 1 byte would not work correctly. + if size < 512 { + size = 512 + } + + data := make([]byte, 0, size) + for { + n, err := f.Read(data[len(data):cap(data)]) + data = data[:len(data)+n] + if err != nil { + if err == io.EOF { + err = nil + } + if err != nil { + c.logger.Errorf("failed to read file %q: %s", path, err) + return "" + } + return string(data) + } + if len(data) >= cap(data) { + d := append(data[:cap(data)], 0) + data = d[:len(data)] + } + } +} + +// ContextProviderBuilder builds the context provider. +func ContextProviderBuilder(log *logger.Logger, c *config.Config, _ bool) (corecomp.ContextProvider, error) { + p := &contextProvider{ + logger: log, + } + if c != nil { + err := c.UnpackTo(&p.cfg) + if err != nil { + return nil, fmt.Errorf("failed to unpack config: %w", err) + } + } + for sourceName, sourceCfg := range p.cfg.Sources { + if sourceCfg.Type != "" && sourceCfg.Type != "raw" { + return nil, fmt.Errorf("%q defined an unsupported type %q", sourceName, sourceCfg.Type) + } + if sourceCfg.Path == "" { + return nil, fmt.Errorf("%q is missing a defined path", sourceName) + } + // only use an absolute path (convert from relative) + if !filepath.IsAbs(sourceCfg.Path) { + path, err := filepath.Abs(sourceCfg.Path) + if err != nil { + return nil, fmt.Errorf("%q failed to determine absolute path for %q: %w", sourceName, sourceCfg.Path, err) + } + sourceCfg.Path = path + } + path := filepath.Dir(sourceCfg.Path) + if path == "" || path == "." { + return nil, fmt.Errorf("%q has a path %q that is invalid", sourceName, sourceCfg.Path) + } + // Windows paths are case-insensitive, force lower here to simplify the implementation + if runtime.GOOS == "windows" { + sourceCfg.Path = strings.ToLower(sourceCfg.Path) + } + p.cfg.Sources[sourceName] = sourceCfg + } + return p, nil +} + +func drainQueue(e <-chan fsnotify.Event) { + for { + select { + case _, ok := <-e: + if !ok { + return + } + default: + return + } + } +} diff --git a/internal/pkg/composable/providers/filesource/filesource_test.go b/internal/pkg/composable/providers/filesource/filesource_test.go new file mode 100644 index 00000000000..12ab839a9b0 --- /dev/null +++ b/internal/pkg/composable/providers/filesource/filesource_test.go @@ -0,0 +1,183 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package filesource + +import ( + "context" + "errors" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/composable" + ctesting "github.com/elastic/elastic-agent/internal/pkg/composable/testing" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func TestContextProvider_Config(t *testing.T) { + scenarios := []struct { + Name string + Config *config.Config + Err error + }{ + { + Name: "no path", + Config: config.MustNewConfigFrom(map[string]interface{}{ + "sources": map[string]interface{}{ + "one": map[string]interface{}{}, + }, + }), + Err: errors.New(`"one" is missing a defined path`), + }, + { + Name: "invalid type", + Config: config.MustNewConfigFrom(map[string]interface{}{ + "sources": map[string]interface{}{ + "one": map[string]interface{}{ + "type": "json", + "path": "/etc/agent/content", + }, + }, + }), + Err: errors.New(`"one" defined an unsupported type "json"`), + }, + // other errors in the config validation are hard to validate in a test + // they are just very defensive + { + Name: "valid path", + Config: config.MustNewConfigFrom(map[string]interface{}{ + "sources": map[string]interface{}{ + "one": map[string]interface{}{ + "path": "/etc/agent/content1", + }, + "two": map[string]interface{}{ + "path": "/etc/agent/content2", + }, + }, + }), + }, + } + for _, s := range scenarios { + t.Run(s.Name, func(t *testing.T) { + log, err := logger.New("filesource_test", false) + require.NoError(t, err) + + builder, _ := composable.Providers.GetContextProvider("filesource") + _, err = builder(log, s.Config, true) + if s.Err != nil { + require.Equal(t, s.Err, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestContextProvider(t *testing.T) { + const testTimeout = 3 * time.Second + + tmpDir := t.TempDir() + value1 := "value1" + file1 := filepath.Join(tmpDir, "vAlUe1_path") + require.NoError(t, os.WriteFile(file1, []byte(value1), 0o644)) + value2 := "value2" + file2 := filepath.Join(tmpDir, "vAlUe2_path") + require.NoError(t, os.WriteFile(file2, []byte(value2), 0o644)) + + log, err := logger.New("filesource_test", false) + require.NoError(t, err) + + osPath := func(path string) string { + return path + } + if runtime.GOOS == "windows" { + // on Windows configure the path as lower case even though it + // is written as non-lower case to ensure that on Windows the + // filewatcher observes the correct path + osPath = func(path string) string { + return strings.ToLower(path) + } + } + c, err := config.NewConfigFrom(map[string]interface{}{ + "sources": map[string]interface{}{ + "one": map[string]interface{}{ + "path": osPath(file1), + }, + "two": map[string]interface{}{ + "path": osPath(file2), + }, + }, + }) + require.NoError(t, err) + builder, _ := composable.Providers.GetContextProvider("filesource") + provider, err := builder(log, c, true) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + setChan := make(chan map[string]interface{}) + comm.CallOnSet(func(value map[string]interface{}) { + // Forward Set's input to the test channel + setChan <- value + }) + + go func() { + _ = provider.Run(ctx, comm) + }() + + // wait for it to be called once + var current map[string]interface{} + select { + case current = <-setChan: + case <-time.After(testTimeout): + require.FailNow(t, "timeout waiting for provider to call Set") + } + + require.Equal(t, value1, current["one"]) + require.Equal(t, value2, current["two"]) + + // update the value in one + value1 = "update1" + require.NoError(t, os.WriteFile(file1, []byte(value1), 0o644)) + + // wait for file1 to be updated + for { + var oneUpdated map[string]interface{} + select { + case oneUpdated = <-setChan: + case <-time.After(testTimeout): + require.FailNow(t, "timeout waiting for provider to call Set") + } + + if value1 == oneUpdated["one"] && value2 == oneUpdated["two"] { + break + } + } + + // update the value in two + value2 = "update2" + require.NoError(t, os.WriteFile(file2, []byte(value2), 0o644)) + + for { + // wait for file2 to be updated + var twoUpdated map[string]interface{} + select { + case twoUpdated = <-setChan: + case <-time.After(testTimeout): + require.FailNow(t, "timeout waiting for provider to call Set") + } + + if value1 == twoUpdated["one"] && value2 == twoUpdated["two"] { + break + } + } +} From 17814cc2d17c42dd644f8e7325fc3c66d5dcecb2 Mon Sep 17 00:00:00 2001 From: Panos Koutsovasilis Date: Tue, 28 Jan 2025 20:46:07 +0200 Subject: [PATCH 02/17] Fix Fleet Enrollment Handling for Containerized Agent (#6568) * feat: add k8s integration test to check fleet enrollment * fix: container correct fleet enrollment when token changes or the agent is unenrolled * fix: update TestDiagnosticLocalConfig to include enrollment_token_hash * feat: add a simple retry logic while validate the stored agent api token * feat: add unit-test for shouldFleetEnroll * fix: improve unit-test explicitness and check for expected number of calls * fix: kind in changelog fragment * fix: split up ack-ing fleet in a separate function --- .mockery.yaml | 8 +- ...ken-change-or-the-agent-is-unenrolled.yaml | 32 ++ .../coordinator/diagnostics_test.go | 6 +- internal/pkg/agent/cmd/container.go | 188 ++++++++- internal/pkg/agent/cmd/container_test.go | 286 +++++++++++++ internal/pkg/agent/cmd/enroll_cmd.go | 13 +- internal/pkg/agent/configuration/fleet.go | 11 +- internal/pkg/crypto/hash.go | 64 +++ internal/pkg/crypto/hash_test.go | 47 +++ .../kubernetes_agent_standalone_test.go | 385 +++++++++++++++++- .../handlers/diagnostics_provider_mock.go | 4 +- .../actions/handlers/log_level_setter_mock.go | 2 +- .../actions/handlers/uploader_mock.go | 2 +- .../pkg/agent/application/info/agent_mock.go | 18 +- .../pkg/agent/storage/storage_mock.go | 198 +++++++++ .../pkg/fleetapi/acker/acker_mock.go | 2 +- .../pkg/fleetapi/client/sender_mock.go | 153 +++++++ .../pkg/control/v2/client/client_mock.go | 6 +- 18 files changed, 1376 insertions(+), 49 deletions(-) create mode 100644 changelog/fragments/1737552345-Fix-enrollment-for-containerised-agent-when-there-is-an-enrollement-token-change-or-the-agent-is-unenrolled.yaml create mode 100644 internal/pkg/crypto/hash.go create mode 100644 internal/pkg/crypto/hash_test.go create mode 100644 testing/mocks/internal_/pkg/agent/storage/storage_mock.go create mode 100644 testing/mocks/internal_/pkg/fleetapi/client/sender_mock.go diff --git a/.mockery.yaml b/.mockery.yaml index 42937061c85..c2445f42811 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -8,6 +8,12 @@ packages: github.com/elastic/elastic-agent/pkg/control/v2/client: interfaces: Client: + github.com/elastic/elastic-agent/internal/pkg/fleetapi/client: + interfaces: + Sender: + github.com/elastic/elastic-agent/internal/pkg/agent/storage: + interfaces: + Storage: github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers: interfaces: Uploader: @@ -22,4 +28,4 @@ packages: Acker: github.com/elastic/elastic-agent/internal/pkg/agent/application/info: interfaces: - Agent: \ No newline at end of file + Agent: diff --git a/changelog/fragments/1737552345-Fix-enrollment-for-containerised-agent-when-there-is-an-enrollement-token-change-or-the-agent-is-unenrolled.yaml b/changelog/fragments/1737552345-Fix-enrollment-for-containerised-agent-when-there-is-an-enrollement-token-change-or-the-agent-is-unenrolled.yaml new file mode 100644 index 00000000000..c7ad80de6b7 --- /dev/null +++ b/changelog/fragments/1737552345-Fix-enrollment-for-containerised-agent-when-there-is-an-enrollement-token-change-or-the-agent-is-unenrolled.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix enrollment for containerised agent when enrollment token changes or the agent is unenrolled + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6568 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: https://github.com/elastic/elastic-agent/issues/3586 diff --git a/internal/pkg/agent/application/coordinator/diagnostics_test.go b/internal/pkg/agent/application/coordinator/diagnostics_test.go index 307831c1b50..6e7f4309c85 100644 --- a/internal/pkg/agent/application/coordinator/diagnostics_test.go +++ b/internal/pkg/agent/application/coordinator/diagnostics_test.go @@ -57,8 +57,9 @@ func TestDiagnosticLocalConfig(t *testing.T) { // local-config hook correctly returns it. cfg := &configuration.Configuration{ Fleet: &configuration.FleetAgentConfig{ - Enabled: true, - AccessAPIKey: "test-key", + Enabled: true, + AccessAPIKey: "test-key", + EnrollmentTokenHash: "test-hash", Client: remote.Config{ Protocol: "test-protocol", }, @@ -119,6 +120,7 @@ agent: fleet: enabled: true access_api_key: "test-key" + enrollment_token_hash: "test-hash" agent: protocol: "test-protocol" ` diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 1f5a62409b2..697709c90b6 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -6,6 +6,8 @@ package cmd import ( "bytes" + "context" + "encoding/base64" "encoding/json" "fmt" "io" @@ -15,12 +17,14 @@ import ( "os/exec" "path/filepath" "regexp" + "slices" "strconv" "strings" "sync" "syscall" "time" + "github.com/cenkalti/backoff/v4" "github.com/spf13/cobra" "gopkg.in/yaml.v2" @@ -31,8 +35,13 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/crypto" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" + "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" @@ -270,20 +279,9 @@ func containerCmd(streams *cli.IOStreams) error { func runContainerCmd(streams *cli.IOStreams, cfg setupConfig) error { var err error - var client *kibana.Client - executable, err := os.Executable() - if err != nil { - return err - } initTimeout := envTimeout(fleetInitTimeoutName) - _, err = os.Stat(paths.AgentConfigFile()) - if !os.IsNotExist(err) && !cfg.Fleet.Force { - // already enrolled, just run the standard run - return run(containerCfgOverrides, false, initTimeout, isContainer) - } - if cfg.FleetServer.Enable { err = ensureServiceToken(streams, &cfg) if err != nil { @@ -291,15 +289,17 @@ func runContainerCmd(streams *cli.IOStreams, cfg setupConfig) error { } } - if cfg.Fleet.Enroll { + shouldEnroll, err := shouldFleetEnroll(cfg) + if err != nil { + return err + } + if shouldEnroll { var policy *kibanaPolicy token := cfg.Fleet.EnrollmentToken if token == "" && !cfg.FleetServer.Enable { - if client == nil { - client, err = kibanaClient(cfg.Kibana, cfg.Kibana.Headers) - if err != nil { - return err - } + client, err := kibanaClient(cfg.Kibana, cfg.Kibana.Headers) + if err != nil { + return err } policy, err = kibanaFetchPolicy(cfg, client, streams) if err != nil { @@ -318,6 +318,11 @@ func runContainerCmd(streams *cli.IOStreams, cfg setupConfig) error { logInfo(streams, "Policy selected for enrollment: ", policyID) } + executable, err := os.Executable() + if err != nil { + return err + } + cmdArgs, err := buildEnrollArgs(cfg, token, policyID) if err != nil { return err @@ -989,3 +994,152 @@ func isContainer(detail component.PlatformDetail) component.PlatformDetail { detail.OS = component.Container return detail } + +var ( + newFleetClient = func(log *logger.Logger, apiKey string, cfg remote.Config) (fleetclient.Sender, error) { + return fleetclient.NewAuthWithConfig(log, apiKey, cfg) + } + newEncryptedDiskStore = storage.NewEncryptedDiskStore + statAgentConfigFile = os.Stat +) + +// agentInfo implements the AgentInfo interface, and it used in shouldFleetEnroll. +type agentInfo struct { + id string +} + +func (a *agentInfo) AgentID() string { + return a.id +} + +// shouldFleetEnroll returns true if the elastic-agent should enroll to fleet. +func shouldFleetEnroll(setupCfg setupConfig) (bool, error) { + if !setupCfg.Fleet.Enroll { + // Enrollment is explicitly disabled in the setup configuration. + return false, nil + } + + if setupCfg.Fleet.Force { + // Enrollment is explicitly enforced by the setup configuration. + return true, nil + } + + agentCfgFilePath := paths.AgentConfigFile() + _, err := statAgentConfigFile(agentCfgFilePath) + if os.IsNotExist(err) { + // The agent configuration file does not exist, so enrollment is required. + return true, nil + } + + ctx := context.Background() + store, err := newEncryptedDiskStore(ctx, agentCfgFilePath) + if err != nil { + return false, fmt.Errorf("failed to instantiate encrypted disk store: %w", err) + } + + reader, err := store.Load() + if err != nil { + return false, fmt.Errorf("failed to load from disk store: %w", err) + } + + cfg, err := config.NewConfigFrom(reader) + if err != nil { + return false, fmt.Errorf("failed to read from disk store: %w", err) + } + + storedConfig, err := configuration.NewFromConfig(cfg) + if err != nil { + return false, fmt.Errorf("failed to read from disk store: %w", err) + } + + storedFleetHosts := storedConfig.Fleet.Client.GetHosts() + if len(storedFleetHosts) == 0 || !slices.Contains(storedFleetHosts, setupCfg.Fleet.URL) { + // The Fleet URL in the setup does not exist in the stored configuration, so enrollment is required. + return true, nil + } + + // Evaluate the stored enrollment token hash against the setup enrollment token if both are present. + // Note that when "upgrading" from an older agent version the enrollment token hash will not exist + // in the stored configuration. + if len(storedConfig.Fleet.EnrollmentTokenHash) > 0 && len(setupCfg.Fleet.EnrollmentToken) > 0 { + enrollmentHashBytes, err := base64.StdEncoding.DecodeString(storedConfig.Fleet.EnrollmentTokenHash) + if err != nil { + return false, fmt.Errorf("failed to decode hash: %w", err) + } + + err = crypto.ComparePBKDF2HashAndPassword(enrollmentHashBytes, []byte(setupCfg.Fleet.EnrollmentToken)) + switch { + case errors.Is(err, crypto.ErrMismatchedHashAndPassword): + // The stored enrollment token hash does not match the new token, so enrollment is required. + return true, nil + case err != nil: + return false, fmt.Errorf("failed to compare hash: %w", err) + } + } + + // Validate the stored API token to check if the agent is still authorized with Fleet. + log, err := logger.New("fleet_client", false) + if err != nil { + return false, fmt.Errorf("failed to create logger: %w", err) + } + fc, err := newFleetClient(log, storedConfig.Fleet.AccessAPIKey, storedConfig.Fleet.Client) + if err != nil { + return false, fmt.Errorf("failed to create fleet client: %w", err) + } + + // Perform an ACK request with **empty events** to verify the validity of the API token. + // If the agent has been manually un-enrolled through the Kibana UI, the ACK request will fail due to an invalid API token. + // In such cases, the agent should automatically re-enroll and "recover" their enrollment status without manual intervention, + // maintaining seamless operation. + err = ackFleet(ctx, fc, storedConfig.Fleet.Info.ID) + switch { + case errors.Is(err, fleetclient.ErrInvalidAPIKey): + // The API key is invalid, so enrollment is required. + return true, nil + case err != nil: + return false, fmt.Errorf("failed to validate api token: %w", err) + } + + // Update the stored enrollment token hash if there is no previous enrollment token hash + // (can happen when "upgrading" from an older version of the agent) and setup enrollment token is present. + if len(storedConfig.Fleet.EnrollmentTokenHash) == 0 && len(setupCfg.Fleet.EnrollmentToken) > 0 { + enrollmentHashBytes, err := crypto.GeneratePBKDF2FromPassword([]byte(setupCfg.Fleet.EnrollmentToken)) + if err != nil { + return false, errors.New("failed to generate enrollment hash") + } + enrollmentTokenHash := base64.StdEncoding.EncodeToString(enrollmentHashBytes) + storedConfig.Fleet.EnrollmentTokenHash = enrollmentTokenHash + + data, err := yaml.Marshal(storedConfig) + if err != nil { + return false, errors.New("could not marshal config") + } + + if err := safelyStoreAgentInfo(store, bytes.NewReader(data)); err != nil { + return false, fmt.Errorf("failed to store agent config: %w", err) + } + } + + return false, nil +} + +// ackFleet performs an ACK request to the fleet server with **empty events**. +func ackFleet(ctx context.Context, client fleetclient.Sender, agentID string) error { + const retryInterval = time.Second + const maxRetries = 3 + ackRequest := &fleetapi.AckRequest{Events: nil} + ackCMD := fleetapi.NewAckCmd(&agentInfo{agentID}, client) + retries := 0 + return backoff.Retry(func() error { + retries++ + _, err := ackCMD.Execute(ctx, ackRequest) + switch { + case err == nil: + return nil + case errors.Is(err, fleetclient.ErrInvalidAPIKey) || retries == maxRetries: + return backoff.Permanent(err) + default: + return err + } + }, &backoff.ConstantBackOff{Interval: retryInterval}) +} diff --git a/internal/pkg/agent/cmd/container_test.go b/internal/pkg/agent/cmd/container_test.go index 1136985a4e6..7a8ebb6a504 100644 --- a/internal/pkg/agent/cmd/container_test.go +++ b/internal/pkg/agent/cmd/container_test.go @@ -5,18 +5,34 @@ package cmd import ( + "context" + "encoding/base64" "encoding/json" + "errors" + "io" "net/http" "net/http/httptest" + "os" "strings" "testing" "time" + "gopkg.in/yaml.v2" + + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/kibana" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/crypto" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" + "github.com/elastic/elastic-agent/internal/pkg/remote" + "github.com/elastic/elastic-agent/pkg/core/logger" + mockStorage "github.com/elastic/elastic-agent/testing/mocks/internal_/pkg/agent/storage" + mockFleetClient "github.com/elastic/elastic-agent/testing/mocks/internal_/pkg/fleetapi/client" ) func TestEnvWithDefault(t *testing.T) { @@ -241,3 +257,273 @@ func TestKibanaFetchToken(t *testing.T) { require.Equal(t, "apiKey", ak) }) } + +func TestShouldEnroll(t *testing.T) { + enrollmentToken := "test-token" + enrollmentTokenHash, err := crypto.GeneratePBKDF2FromPassword([]byte(enrollmentToken)) + require.NoError(t, err) + enrollmentTokenHashBase64 := base64.StdEncoding.EncodeToString(enrollmentTokenHash) + + enrollmentTokenOther := "test-token-other" + + fleetNetworkErr := errors.New("fleet network error") + for name, tc := range map[string]struct { + cfg setupConfig + statFn func(path string) (os.FileInfo, error) + encryptedDiskStoreFn func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage + fleetClientFn func(t *testing.T) client.Sender + expectedSavedConfig func(t *testing.T, savedConfig *configuration.Configuration) + expectedShouldEnroll bool + expectedErr error + }{ + "should not enroll if fleet enroll is disabled": { + cfg: setupConfig{Fleet: fleetConfig{Enroll: false}}, + expectedShouldEnroll: false, + }, + "should enroll if fleet force is true": { + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, Force: true}}, + expectedShouldEnroll: true, + }, + "should enroll if config file does not exist": { + statFn: func(path string) (os.FileInfo, error) { return nil, os.ErrNotExist }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, Force: true}}, + expectedShouldEnroll: true, + }, + "should enroll on fleet url change": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1"}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + enrollment_token_hash: "test-hash" + hosts: + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + return m + }, + expectedShouldEnroll: true, + }, + "should enroll on fleet token change": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1", EnrollmentToken: enrollmentTokenOther}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + enrollment_token_hash: "`+enrollmentTokenHashBase64+`" + hosts: + - host1 + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + return m + }, + expectedShouldEnroll: true, + }, + "should enroll on unauthorized api": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1", EnrollmentToken: enrollmentToken}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + enrollment_token_hash: "`+enrollmentTokenHashBase64+`" + hosts: + - host1 + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + return m + }, + fleetClientFn: func(t *testing.T) client.Sender { + tries := 0 + m := mockFleetClient.NewSender(t) + call := m.On("Send", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + call.Run(func(args mock.Arguments) { + if tries <= 1 { + call.Return(nil, fleetNetworkErr) + } else { + call.Return(nil, client.ErrInvalidAPIKey) + } + tries++ + }).Times(3) + return m + }, + expectedShouldEnroll: true, + }, + "should not enroll on no changes": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1", EnrollmentToken: enrollmentToken}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + enrollment_token_hash: "`+enrollmentTokenHashBase64+`" + hosts: + - host1 + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + return m + }, + fleetClientFn: func(t *testing.T) client.Sender { + tries := 0 + m := mockFleetClient.NewSender(t) + call := m.On("Send", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + call.Run(func(args mock.Arguments) { + if tries <= 1 { + call.Return(nil, fleetNetworkErr) + } else { + call.Return(&http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"action": "acks", "items":[]}`)), + }, nil) + } + tries++ + }).Times(3) + return m + }, + expectedShouldEnroll: false, + }, + "should fail on fleet network errors": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1", EnrollmentToken: enrollmentToken}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + enrollment_token_hash: "`+enrollmentTokenHashBase64+`" + hosts: + - host1 + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + return m + }, + fleetClientFn: func(t *testing.T) client.Sender { + m := mockFleetClient.NewSender(t) + m.On("Send", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, fleetNetworkErr).Times(3) + return m + }, + expectedErr: fleetNetworkErr, + }, + "should not update the enrollment token hash if it does not exist in setup configuration": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1", EnrollmentToken: ""}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + hosts: + - host1 + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + return m + }, + fleetClientFn: func(t *testing.T) client.Sender { + m := mockFleetClient.NewSender(t) + m.On("Send", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"action": "acks", "items":[]}`)), + }, nil).Once() + return m + }, + expectedShouldEnroll: false, + }, + "should not enroll on no changes and update the stored enrollment token hash": { + statFn: func(path string) (os.FileInfo, error) { return nil, nil }, + cfg: setupConfig{Fleet: fleetConfig{Enroll: true, URL: "host1", EnrollmentToken: enrollmentToken}}, + encryptedDiskStoreFn: func(t *testing.T, savedConfig *configuration.Configuration) storage.Storage { + m := mockStorage.NewStorage(t) + m.On("Load").Return(io.NopCloser(strings.NewReader(`fleet: + enabled: true + access_api_key: "test-key" + hosts: + - host1 + - host2 + - host3 + agent: + protocol: "https"`)), nil).Once() + m.On("Save", mock.Anything).Run(func(args mock.Arguments) { + reader := args.Get(0).(io.Reader) + data, _ := io.ReadAll(reader) + _ = yaml.Unmarshal(data, savedConfig) + }).Return(nil).Times(0) + return m + }, + fleetClientFn: func(t *testing.T) client.Sender { + m := mockFleetClient.NewSender(t) + m.On("Send", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"action": "acks", "items":[]}`)), + }, nil).Once() + return m + }, + expectedSavedConfig: func(t *testing.T, savedConfig *configuration.Configuration) { + require.NotNil(t, savedConfig) + require.NotNil(t, savedConfig.Fleet) + enrollmentTokeHash, err := base64.StdEncoding.DecodeString(savedConfig.Fleet.EnrollmentTokenHash) + require.NoError(t, err) + require.NoError(t, crypto.ComparePBKDF2HashAndPassword(enrollmentTokeHash, []byte(enrollmentToken))) + }, + expectedShouldEnroll: false, + }, + } { + t.Run(name, func(t *testing.T) { + savedConfig := &configuration.Configuration{} + if tc.statFn != nil { + oldStatFn := statAgentConfigFile + statAgentConfigFile = tc.statFn + t.Cleanup(func() { + statAgentConfigFile = oldStatFn + }) + } + if tc.encryptedDiskStoreFn != nil { + oldEncryptedDiskStore := newEncryptedDiskStore + newEncryptedDiskStore = func(ctx context.Context, target string, opts ...storage.EncryptedOptionFunc) (storage.Storage, error) { + return tc.encryptedDiskStoreFn(t, savedConfig), nil + } + t.Cleanup(func() { + newEncryptedDiskStore = oldEncryptedDiskStore + }) + } + if tc.fleetClientFn != nil { + oldFleetClient := newFleetClient + newFleetClient = func(log *logger.Logger, apiKey string, cfg remote.Config) (client.Sender, error) { + return tc.fleetClientFn(t), nil + } + t.Cleanup(func() { + newFleetClient = oldFleetClient + }) + } + actualShouldEnroll, err := shouldFleetEnroll(tc.cfg) + if tc.expectedErr != nil { + require.ErrorIs(t, err, tc.expectedErr) + return + } + require.NoError(t, err) + require.Equal(t, tc.expectedShouldEnroll, actualShouldEnroll) + if tc.expectedSavedConfig != nil { + tc.expectedSavedConfig(t, savedConfig) + } + }) + } +} diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 28f5c135794..51d87e0e0bf 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -7,6 +7,7 @@ package cmd import ( "bytes" "context" + "encoding/base64" "fmt" "io" "math/rand/v2" @@ -33,6 +34,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" + "github.com/elastic/elastic-agent/internal/pkg/crypto" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/release" @@ -586,7 +588,7 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte errors.TypeNetwork) } - fleetConfig, err := createFleetConfigFromEnroll(resp.Item.AccessAPIKey, c.remoteConfig) + fleetConfig, err := createFleetConfigFromEnroll(resp.Item.AccessAPIKey, c.options.EnrollAPIKey, c.remoteConfig) if err != nil { return err } @@ -1021,12 +1023,17 @@ func createFleetServerBootstrapConfig( return cfg, nil } -func createFleetConfigFromEnroll(accessAPIKey string, cli remote.Config) (*configuration.FleetAgentConfig, error) { +func createFleetConfigFromEnroll(accessAPIKey string, enrollmentToken string, cli remote.Config) (*configuration.FleetAgentConfig, error) { cfg := configuration.DefaultFleetAgentConfig() cfg.Enabled = true cfg.AccessAPIKey = accessAPIKey cfg.Client = cli - + enrollmentHashBytes, err := crypto.GeneratePBKDF2FromPassword([]byte(enrollmentToken)) + if err != nil { + return nil, errors.New(err, "failed to generate enrollment hash", errors.TypeConfig) + } + enrollmentTokenHash := base64.StdEncoding.EncodeToString(enrollmentHashBytes) + cfg.EnrollmentTokenHash = enrollmentTokenHash if err := cfg.Valid(); err != nil { return nil, errors.New(err, "invalid enrollment options", errors.TypeConfig) } diff --git a/internal/pkg/agent/configuration/fleet.go b/internal/pkg/agent/configuration/fleet.go index dc1741b8694..0020018c20c 100644 --- a/internal/pkg/agent/configuration/fleet.go +++ b/internal/pkg/agent/configuration/fleet.go @@ -12,11 +12,12 @@ import ( // FleetAgentConfig is the internal configuration of the agent after the enrollment is done, // this configuration is not exposed in anyway in the elastic-agent.yml and is only internal configuration. type FleetAgentConfig struct { - Enabled bool `config:"enabled" yaml:"enabled"` - AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` - Client remote.Config `config:",inline" yaml:",inline"` - Info *AgentInfo `config:"agent" yaml:"agent"` - Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` + Enabled bool `config:"enabled" yaml:"enabled"` + AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` + EnrollmentTokenHash string `config:"enrollment_token_hash" yaml:"enrollment_token_hash"` + Client remote.Config `config:",inline" yaml:",inline"` + Info *AgentInfo `config:"agent" yaml:"agent"` + Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` } // Valid validates the required fields for accessing the API. diff --git a/internal/pkg/crypto/hash.go b/internal/pkg/crypto/hash.go new file mode 100644 index 00000000000..b88f0d47e34 --- /dev/null +++ b/internal/pkg/crypto/hash.go @@ -0,0 +1,64 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package crypto + +import ( + "bytes" + "crypto/hmac" + "errors" + "fmt" +) + +const ( + // https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + hashIterations = 210_000 + hashKeyLength = 64 + hashSaltLength = 16 + hashTotalLength = hashSaltLength + hashKeyLength +) + +// ErrMismatchedHashAndPassword is the error returned from ComparePBKDF2HashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("hashedPassword is not the hash of the given password") + +// GeneratePBKDF2FromPassword hashes a password using PBKDF2. +func GeneratePBKDF2FromPassword(password []byte) ([]byte, error) { + // Generate a random salt + salt, err := randomBytes(hashSaltLength) + if err != nil { + return nil, fmt.Errorf("failed to generate salt: %w", err) + } + + // Write hash + // SALT|KEY + key := stretchPassword(password, salt, hashIterations, hashKeyLength) + hash := new(bytes.Buffer) + hash.Write(salt) + hash.Write(key) + + out := hash.Bytes() + if len(out) != hashTotalLength { + return nil, errors.New("written bytes do not match header size") + } + return out, nil +} + +// ComparePBKDF2HashAndPassword verifies if the hashed password matches the provided plain password. +func ComparePBKDF2HashAndPassword(hash []byte, password []byte) error { + if len(hash) != hashTotalLength { + return fmt.Errorf("hashedPassword is invalid") + } + + // Read from hash + // SALT|KEY + salt := hash[:hashSaltLength] + keyFromHash := hash[hashSaltLength:hashTotalLength] + keyFromPassword := stretchPassword(password, salt, hashIterations, hashKeyLength) + if !hmac.Equal(keyFromHash, keyFromPassword) { + return ErrMismatchedHashAndPassword + } + + return nil +} diff --git a/internal/pkg/crypto/hash_test.go b/internal/pkg/crypto/hash_test.go new file mode 100644 index 00000000000..3ef86b17ddb --- /dev/null +++ b/internal/pkg/crypto/hash_test.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package crypto + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// Fuzzy test for GenerateFromPassword and ComparePBKDF2HashAndPassword +func FuzzGenerateAndCompare(f *testing.F) { + // Seed the fuzzer with a few example passwords + f.Add("password123") + f.Add("123456") + f.Add("!@#$%^&*()_+-=") + f.Add("longpasswordwith1234567890and@symbols") + f.Add("short") + f.Add("V2tzU2c1UUJka2Q5blFyUUJqY1c6V294dlEtWXVUV3FQajZBbzdSd0JWUQ==") + + f.Fuzz(func(t *testing.T, password string) { + if len(password) == 0 { + // Skip empty passwords to avoid unnecessary checks + return + } + + t.Log("Testing password:", password) + + // Generate hashed password + hash, err := GeneratePBKDF2FromPassword([]byte(password)) + if err != nil { + t.Errorf("Failed to generate hashed password: %v", err) + return + } + + // Verify the hashed password + err = ComparePBKDF2HashAndPassword(hash, []byte(password)) + require.NoError(t, err, "Password verification failed") + + // Negative test: modify the password slightly and check verification fails + modifiedPassword := password + "wrong" + err = ComparePBKDF2HashAndPassword(hash, []byte(modifiedPassword)) + require.ErrorIs(t, err, ErrMismatchedHashAndPassword, "Password verification succeeded") + }) +} diff --git a/testing/integration/kubernetes_agent_standalone_test.go b/testing/integration/kubernetes_agent_standalone_test.go index 3e35aa95c0f..280e25e4fbf 100644 --- a/testing/integration/kubernetes_agent_standalone_test.go +++ b/testing/integration/kubernetes_agent_standalone_test.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "io" + "net/http" "os" "path/filepath" "regexp" @@ -25,6 +26,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent-libs/kibana" "github.com/elastic/go-elasticsearch/v8" appsv1 "k8s.io/api/apps/v1" @@ -369,6 +371,297 @@ func TestKubernetesAgentHelm(t *testing.T) { k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), }, }, + { + name: "helm managed agent unenrolled with different enrollment token", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + // unenroll all agents from fleet and keep track of their ids + unEnrolledIDs := map[string]struct{}{} + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + unEnrolledIDs[id] = struct{}{} + _, err = info.KibanaClient.UnEnrollAgent(ctx, kibana.UnEnrollAgentRequest{ + ID: id, + Revoke: true, + }) + return err + })(t, ctx, kCtx, namespace) + k8sStepHelmUninstall("helm-agent")(t, ctx, kCtx, namespace) + + // generate a new enrollment token and re-deploy, the helm chart since it is + // under the same release name and same namespace will have the same state + // as the previous deployment + enrollParams, err := fleettools.NewEnrollParams(ctx, info.KibanaClient) + require.NoError(t, err, "failed to create fleet enroll params") + require.NotEqual(t, kCtx.enrollParams.EnrollmentToken, enrollParams.EnrollmentToken, "enrollment token did not change") + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": enrollParams.FleetURL, + "token": enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + })(t, ctx, kCtx, namespace) + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil)(t, ctx, kCtx, namespace) + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent")(t, ctx, kCtx, namespace) + enrolledIDs := map[string]time.Time{} + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + resp, err := kibanaGetAgent(ctx, info.KibanaClient, id) + if err != nil { + return err + } + // no ID should match the ones from the unenrolled ones + if _, exists := unEnrolledIDs[id]; exists { + return fmt.Errorf("agent with id %s found in unEnrolledIDs", id) + } + // keep track of the new enrolled ids and their enrollment time as reported by fleet + enrolledIDs[id] = resp.EnrolledAt + return nil + })(t, ctx, kCtx, namespace) + + // uninstall and reinstall but this time check that the elastic-agent is not re-enrolling + k8sStepHelmUninstall("helm-agent")(t, ctx, kCtx, namespace) + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": enrollParams.FleetURL, + "token": enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + })(t, ctx, kCtx, namespace) + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil)(t, ctx, kCtx, namespace) + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent")(t, ctx, kCtx, namespace) + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + resp, err := kibanaGetAgent(ctx, info.KibanaClient, id) + if err != nil { + return err + } + // no ID should match the ones from the unenrolled ones + enrolledAt, exists := enrolledIDs[id] + if !exists { + return fmt.Errorf("agent with id %s not found in enrolledIDs", id) + } + + if !resp.EnrolledAt.Equal(enrolledAt) { + return fmt.Errorf("agent enrollment time is updated") + } + return nil + })(t, ctx, kCtx, namespace) + }, + }, + }, + { + name: "helm managed agent unenrolled", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent"), + func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + // unenroll all agents from fleet and keep track of their ids + unEnrolledIDs := map[string]struct{}{} + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + unEnrolledIDs[id] = struct{}{} + _, err = info.KibanaClient.UnEnrollAgent(ctx, kibana.UnEnrollAgentRequest{ + ID: id, + Revoke: true, + }) + return err + })(t, ctx, kCtx, namespace) + + // re-deploy with the same enrollment token, the helm chart since it is + // under the same release name and same namespace will have the same state + // as the previous deployment + k8sStepHelmUninstall("helm-agent")(t, ctx, kCtx, namespace) + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + })(t, ctx, kCtx, namespace) + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil)(t, ctx, kCtx, namespace) + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent")(t, ctx, kCtx, namespace) + enrolledIDs := map[string]time.Time{} + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + resp, err := kibanaGetAgent(ctx, info.KibanaClient, id) + if err != nil { + return err + } + // no ID should match the ones from the unenrolled ones + if _, exists := unEnrolledIDs[id]; exists { + return fmt.Errorf("agent with id %s found in unEnrolledIDs", id) + } + // keep track of the new enrolled ids and their enrollment time as reported by fleet + enrolledIDs[id] = resp.EnrolledAt + return nil + })(t, ctx, kCtx, namespace) + + // uninstall and reinstall but this time check that the elastic-agent is not re-enrolling + k8sStepHelmUninstall("helm-agent")(t, ctx, kCtx, namespace) + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + })(t, ctx, kCtx, namespace) + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil)(t, ctx, kCtx, namespace) + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent")(t, ctx, kCtx, namespace) + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + resp, err := kibanaGetAgent(ctx, info.KibanaClient, id) + if err != nil { + return err + } + // no ID should match the ones from the unenrolled ones + enrolledAt, exists := enrolledIDs[id] + if !exists { + return fmt.Errorf("agent with id %s not found in enrolledIDs", id) + } + + if !resp.EnrolledAt.Equal(enrolledAt) { + return fmt.Errorf("agent enrollment time is updated") + } + return nil + })(t, ctx, kCtx, namespace) + }, + }, + }, + { + name: "helm managed agent upgrade older version", + steps: []k8sTestStep{ + k8sStepCreateNamespace(), + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": "8.17.0", + "pullPolicy": "IfNotPresent", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + }), + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil), + func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + enrolledIDs := map[string]time.Time{} + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + resp, err := kibanaGetAgent(ctx, info.KibanaClient, id) + if err != nil { + return err + } + // keep track of the new enrolled ids and their enrollment time as reported by fleet + enrolledIDs[id] = resp.EnrolledAt + return nil + })(t, ctx, kCtx, namespace) + k8sStepHelmUninstall("helm-agent")(t, ctx, kCtx, namespace) + k8sStepHelmDeploy(agentK8SHelm, "helm-agent", map[string]any{ + "agent": map[string]any{ + "unprivileged": false, + "image": map[string]any{ + "repository": kCtx.agentImageRepo, + "tag": kCtx.agentImageTag, + "pullPolicy": "Never", + }, + "fleet": map[string]any{ + "enabled": true, + "url": kCtx.enrollParams.FleetURL, + "token": kCtx.enrollParams.EnrollmentToken, + "preset": "perNode", + }, + }, + })(t, ctx, kCtx, namespace) + k8sStepCheckAgentStatus("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", nil)(t, ctx, kCtx, namespace) + k8sStepRunInnerTests("name=agent-pernode-helm-agent", schedulableNodeCount, "agent")(t, ctx, kCtx, namespace) + k8sStepForEachAgentID("name=agent-pernode-helm-agent", schedulableNodeCount, "agent", func(ctx context.Context, id string) error { + resp, err := kibanaGetAgent(ctx, info.KibanaClient, id) + if err != nil { + return err + } + enrolledAt, exists := enrolledIDs[id] + if !exists { + return fmt.Errorf("agent with id %s not found in enrolledIDs", id) + } + if !resp.EnrolledAt.Equal(enrolledAt) { + return fmt.Errorf("agent enrollment time is updated") + } + return nil + })(t, ctx, kCtx, namespace) + }, + }, + }, { name: "helm managed agent default kubernetes unprivileged", steps: []k8sTestStep{ @@ -553,6 +846,25 @@ func k8sCheckAgentStatus(ctx context.Context, client klient.Client, stdout *byte } } +// k8sGetAgentID returns the agent ID for the given agent pod +func k8sGetAgentID(ctx context.Context, client klient.Client, stdout *bytes.Buffer, stderr *bytes.Buffer, + namespace string, agentPodName string, containerName string) (string, error) { + command := []string{"elastic-agent", "status", "--output=json"} + + status := atesting.AgentStatusOutput{} // clear status output + stdout.Reset() + stderr.Reset() + if err := client.Resources().ExecInPod(ctx, namespace, agentPodName, containerName, command, stdout, stderr); err != nil { + return "", err + } + + if err := json.Unmarshal(stdout.Bytes(), &status); err != nil { + return "", err + } + + return status.Info.ID, nil +} + // getAgentComponentState returns the component state for the given component name and a bool indicating if it exists. func getAgentComponentState(status atesting.AgentStatusOutput, componentName string) (int, bool) { for _, comp := range status.Components { @@ -1204,6 +1516,25 @@ func k8sStepCheckAgentStatus(agentPodLabelSelector string, expectedPodNumber int } } +func k8sStepForEachAgentID(agentPodLabelSelector string, expectedPodNumber int, containerName string, cb func(ctx context.Context, id string) error) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + perNodePodList := &corev1.PodList{} + err := kCtx.client.Resources(namespace).List(ctx, perNodePodList, func(opt *metav1.ListOptions) { + opt.LabelSelector = agentPodLabelSelector + }) + require.NoError(t, err, "failed to list pods with selector ", perNodePodList) + require.NotEmpty(t, perNodePodList.Items, "no pods found with selector ", perNodePodList) + require.Equal(t, expectedPodNumber, len(perNodePodList.Items), "unexpected number of pods found with selector ", perNodePodList) + var stdout, stderr bytes.Buffer + for _, pod := range perNodePodList.Items { + id, err := k8sGetAgentID(ctx, kCtx.client, &stdout, &stderr, namespace, pod.Name, containerName) + require.NoError(t, err, "failed to unenroll agent %s", pod.Name) + require.NotEmpty(t, id, "agent id should not be empty") + require.NoError(t, cb(ctx, id), "callback for each agent id failed") + } + } +} + // k8sStepRunInnerTests invokes the k8s inner tests inside the pods returned by the selector. Note that this // step requires the agent image to be built with the testing framework as there is the point where the binary // for the inner tests is copied @@ -1231,6 +1562,24 @@ func k8sStepRunInnerTests(agentPodLabelSelector string, expectedPodNumber int, c } } +// k8sStepHelmUninstall uninstalls the helm chart with the given release name +func k8sStepHelmUninstall(releaseName string) k8sTestStep { + return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { + settings := cli.New() + settings.SetNamespace(namespace) + actionConfig := &action.Configuration{} + + err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), "", + func(format string, v ...interface{}) {}) + require.NoError(t, err, "failed to init helm action config") + + uninstallAction := action.NewUninstall(actionConfig) + uninstallAction.Wait = true + _, err = uninstallAction.Run(releaseName) + require.NoError(t, err, "failed to uninstall helm chart") + } +} + // k8sStepHelmDeploy deploys a helm chart with the given values and the release name func k8sStepHelmDeploy(chartPath string, releaseName string, values map[string]any) k8sTestStep { return func(t *testing.T, ctx context.Context, kCtx k8sContext, namespace string) { @@ -1254,10 +1603,7 @@ func k8sStepHelmDeploy(chartPath string, releaseName string, values map[string]a uninstallAction := action.NewUninstall(actionConfig) uninstallAction.Wait = true - _, err = uninstallAction.Run(releaseName) - if err != nil { - t.Logf("failed to uninstall helm chart: %v", err) - } + _, _ = uninstallAction.Run(releaseName) }) installAction := action.NewInstall(actionConfig) @@ -1375,3 +1721,34 @@ func k8sStepCheckRestrictUpgrade(agentPodLabelSelector string, expectedPodNumber } } } + +// GetAgentResponse extends kibana.GetAgentResponse and includes the EnrolledAt field +type GetAgentResponse struct { + kibana.GetAgentResponse `json:",inline"` + EnrolledAt time.Time `json:"enrolled_at"` +} + +// kibanaGetAgent essentially re-implements kibana.GetAgent to extract also GetAgentResponse.EnrolledAt +func kibanaGetAgent(ctx context.Context, kc *kibana.Client, id string) (*GetAgentResponse, error) { + apiURL := fmt.Sprintf("/api/fleet/agents/%s", id) + r, err := kc.Connection.SendWithContext(ctx, http.MethodGet, apiURL, nil, nil, nil) + if err != nil { + return nil, fmt.Errorf("error calling get agent API: %w", err) + } + defer r.Body.Close() + var agentResp struct { + Item GetAgentResponse `json:"item"` + } + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, fmt.Errorf("reading response body: %w", err) + } + if r.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error calling get agent API: %s", string(b)) + } + err = json.Unmarshal(b, &agentResp) + if err != nil { + return nil, fmt.Errorf("unmarshalling response json: %w", err) + } + return &agentResp.Item, nil +} diff --git a/testing/mocks/internal_/pkg/agent/application/actions/handlers/diagnostics_provider_mock.go b/testing/mocks/internal_/pkg/agent/application/actions/handlers/diagnostics_provider_mock.go index 0004873c973..7a166506eaf 100644 --- a/testing/mocks/internal_/pkg/agent/application/actions/handlers/diagnostics_provider_mock.go +++ b/testing/mocks/internal_/pkg/agent/application/actions/handlers/diagnostics_provider_mock.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.51.1. DO NOT EDIT. package handlers @@ -33,7 +33,7 @@ func (_m *DiagnosticsProvider) EXPECT() *DiagnosticsProvider_Expecter { return &DiagnosticsProvider_Expecter{mock: &_m.Mock} } -// DiagnosticHooks provides a mock function with given fields: +// DiagnosticHooks provides a mock function with no fields func (_m *DiagnosticsProvider) DiagnosticHooks() diagnostics.Hooks { ret := _m.Called() diff --git a/testing/mocks/internal_/pkg/agent/application/actions/handlers/log_level_setter_mock.go b/testing/mocks/internal_/pkg/agent/application/actions/handlers/log_level_setter_mock.go index 1842d03ceec..c937315d8a0 100644 --- a/testing/mocks/internal_/pkg/agent/application/actions/handlers/log_level_setter_mock.go +++ b/testing/mocks/internal_/pkg/agent/application/actions/handlers/log_level_setter_mock.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.51.1. DO NOT EDIT. package handlers diff --git a/testing/mocks/internal_/pkg/agent/application/actions/handlers/uploader_mock.go b/testing/mocks/internal_/pkg/agent/application/actions/handlers/uploader_mock.go index e56e0ad0a88..f89f8b85641 100644 --- a/testing/mocks/internal_/pkg/agent/application/actions/handlers/uploader_mock.go +++ b/testing/mocks/internal_/pkg/agent/application/actions/handlers/uploader_mock.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.51.1. DO NOT EDIT. package handlers diff --git a/testing/mocks/internal_/pkg/agent/application/info/agent_mock.go b/testing/mocks/internal_/pkg/agent/application/info/agent_mock.go index ccec9b2929e..c61b0bab787 100644 --- a/testing/mocks/internal_/pkg/agent/application/info/agent_mock.go +++ b/testing/mocks/internal_/pkg/agent/application/info/agent_mock.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.51.1. DO NOT EDIT. package info @@ -25,7 +25,7 @@ func (_m *Agent) EXPECT() *Agent_Expecter { return &Agent_Expecter{mock: &_m.Mock} } -// AgentID provides a mock function with given fields: +// AgentID provides a mock function with no fields func (_m *Agent) AgentID() string { ret := _m.Called() @@ -70,7 +70,7 @@ func (_c *Agent_AgentID_Call) RunAndReturn(run func() string) *Agent_AgentID_Cal return _c } -// Headers provides a mock function with given fields: +// Headers provides a mock function with no fields func (_m *Agent) Headers() map[string]string { ret := _m.Called() @@ -117,7 +117,7 @@ func (_c *Agent_Headers_Call) RunAndReturn(run func() map[string]string) *Agent_ return _c } -// IsStandalone provides a mock function with given fields: +// IsStandalone provides a mock function with no fields func (_m *Agent) IsStandalone() bool { ret := _m.Called() @@ -162,7 +162,7 @@ func (_c *Agent_IsStandalone_Call) RunAndReturn(run func() bool) *Agent_IsStanda return _c } -// LogLevel provides a mock function with given fields: +// LogLevel provides a mock function with no fields func (_m *Agent) LogLevel() string { ret := _m.Called() @@ -207,7 +207,7 @@ func (_c *Agent_LogLevel_Call) RunAndReturn(run func() string) *Agent_LogLevel_C return _c } -// RawLogLevel provides a mock function with given fields: +// RawLogLevel provides a mock function with no fields func (_m *Agent) RawLogLevel() string { ret := _m.Called() @@ -345,7 +345,7 @@ func (_c *Agent_SetLogLevel_Call) RunAndReturn(run func(context.Context, string) return _c } -// Snapshot provides a mock function with given fields: +// Snapshot provides a mock function with no fields func (_m *Agent) Snapshot() bool { ret := _m.Called() @@ -390,7 +390,7 @@ func (_c *Agent_Snapshot_Call) RunAndReturn(run func() bool) *Agent_Snapshot_Cal return _c } -// Unprivileged provides a mock function with given fields: +// Unprivileged provides a mock function with no fields func (_m *Agent) Unprivileged() bool { ret := _m.Called() @@ -435,7 +435,7 @@ func (_c *Agent_Unprivileged_Call) RunAndReturn(run func() bool) *Agent_Unprivil return _c } -// Version provides a mock function with given fields: +// Version provides a mock function with no fields func (_m *Agent) Version() string { ret := _m.Called() diff --git a/testing/mocks/internal_/pkg/agent/storage/storage_mock.go b/testing/mocks/internal_/pkg/agent/storage/storage_mock.go new file mode 100644 index 00000000000..44f62085db7 --- /dev/null +++ b/testing/mocks/internal_/pkg/agent/storage/storage_mock.go @@ -0,0 +1,198 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by mockery v2.51.1. DO NOT EDIT. + +package storage + +import ( + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// Storage is an autogenerated mock type for the Storage type +type Storage struct { + mock.Mock +} + +type Storage_Expecter struct { + mock *mock.Mock +} + +func (_m *Storage) EXPECT() *Storage_Expecter { + return &Storage_Expecter{mock: &_m.Mock} +} + +// Exists provides a mock function with no fields +func (_m *Storage) Exists() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storage_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type Storage_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +func (_e *Storage_Expecter) Exists() *Storage_Exists_Call { + return &Storage_Exists_Call{Call: _e.mock.On("Exists")} +} + +func (_c *Storage_Exists_Call) Run(run func()) *Storage_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Storage_Exists_Call) Return(_a0 bool, _a1 error) *Storage_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storage_Exists_Call) RunAndReturn(run func() (bool, error)) *Storage_Exists_Call { + _c.Call.Return(run) + return _c +} + +// Load provides a mock function with no fields +func (_m *Storage) Load() (io.ReadCloser, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Load") + } + + var r0 io.ReadCloser + var r1 error + if rf, ok := ret.Get(0).(func() (io.ReadCloser, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() io.ReadCloser); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Storage_Load_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Load' +type Storage_Load_Call struct { + *mock.Call +} + +// Load is a helper method to define mock.On call +func (_e *Storage_Expecter) Load() *Storage_Load_Call { + return &Storage_Load_Call{Call: _e.mock.On("Load")} +} + +func (_c *Storage_Load_Call) Run(run func()) *Storage_Load_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Storage_Load_Call) Return(_a0 io.ReadCloser, _a1 error) *Storage_Load_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Storage_Load_Call) RunAndReturn(run func() (io.ReadCloser, error)) *Storage_Load_Call { + _c.Call.Return(run) + return _c +} + +// Save provides a mock function with given fields: _a0 +func (_m *Storage) Save(_a0 io.Reader) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Save") + } + + var r0 error + if rf, ok := ret.Get(0).(func(io.Reader) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Storage_Save_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Save' +type Storage_Save_Call struct { + *mock.Call +} + +// Save is a helper method to define mock.On call +// - _a0 io.Reader +func (_e *Storage_Expecter) Save(_a0 interface{}) *Storage_Save_Call { + return &Storage_Save_Call{Call: _e.mock.On("Save", _a0)} +} + +func (_c *Storage_Save_Call) Run(run func(_a0 io.Reader)) *Storage_Save_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(io.Reader)) + }) + return _c +} + +func (_c *Storage_Save_Call) Return(_a0 error) *Storage_Save_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Storage_Save_Call) RunAndReturn(run func(io.Reader) error) *Storage_Save_Call { + _c.Call.Return(run) + return _c +} + +// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStorage(t interface { + mock.TestingT + Cleanup(func()) +}) *Storage { + mock := &Storage{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/testing/mocks/internal_/pkg/fleetapi/acker/acker_mock.go b/testing/mocks/internal_/pkg/fleetapi/acker/acker_mock.go index d397b4b473c..79446571246 100644 --- a/testing/mocks/internal_/pkg/fleetapi/acker/acker_mock.go +++ b/testing/mocks/internal_/pkg/fleetapi/acker/acker_mock.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.51.1. DO NOT EDIT. package acker diff --git a/testing/mocks/internal_/pkg/fleetapi/client/sender_mock.go b/testing/mocks/internal_/pkg/fleetapi/client/sender_mock.go new file mode 100644 index 00000000000..ff40afa3ee1 --- /dev/null +++ b/testing/mocks/internal_/pkg/fleetapi/client/sender_mock.go @@ -0,0 +1,153 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by mockery v2.51.1. DO NOT EDIT. + +package client + +import ( + context "context" + http "net/http" + + io "io" + + mock "github.com/stretchr/testify/mock" + + url "net/url" +) + +// Sender is an autogenerated mock type for the Sender type +type Sender struct { + mock.Mock +} + +type Sender_Expecter struct { + mock *mock.Mock +} + +func (_m *Sender) EXPECT() *Sender_Expecter { + return &Sender_Expecter{mock: &_m.Mock} +} + +// Send provides a mock function with given fields: ctx, method, path, params, headers, body +func (_m *Sender) Send(ctx context.Context, method string, path string, params url.Values, headers http.Header, body io.Reader) (*http.Response, error) { + ret := _m.Called(ctx, method, path, params, headers, body) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 *http.Response + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, url.Values, http.Header, io.Reader) (*http.Response, error)); ok { + return rf(ctx, method, path, params, headers, body) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, url.Values, http.Header, io.Reader) *http.Response); ok { + r0 = rf(ctx, method, path, params, headers, body) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*http.Response) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, url.Values, http.Header, io.Reader) error); ok { + r1 = rf(ctx, method, path, params, headers, body) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Sender_Send_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Send' +type Sender_Send_Call struct { + *mock.Call +} + +// Send is a helper method to define mock.On call +// - ctx context.Context +// - method string +// - path string +// - params url.Values +// - headers http.Header +// - body io.Reader +func (_e *Sender_Expecter) Send(ctx interface{}, method interface{}, path interface{}, params interface{}, headers interface{}, body interface{}) *Sender_Send_Call { + return &Sender_Send_Call{Call: _e.mock.On("Send", ctx, method, path, params, headers, body)} +} + +func (_c *Sender_Send_Call) Run(run func(ctx context.Context, method string, path string, params url.Values, headers http.Header, body io.Reader)) *Sender_Send_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(url.Values), args[4].(http.Header), args[5].(io.Reader)) + }) + return _c +} + +func (_c *Sender_Send_Call) Return(_a0 *http.Response, _a1 error) *Sender_Send_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Sender_Send_Call) RunAndReturn(run func(context.Context, string, string, url.Values, http.Header, io.Reader) (*http.Response, error)) *Sender_Send_Call { + _c.Call.Return(run) + return _c +} + +// URI provides a mock function with no fields +func (_m *Sender) URI() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for URI") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Sender_URI_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'URI' +type Sender_URI_Call struct { + *mock.Call +} + +// URI is a helper method to define mock.On call +func (_e *Sender_Expecter) URI() *Sender_URI_Call { + return &Sender_URI_Call{Call: _e.mock.On("URI")} +} + +func (_c *Sender_URI_Call) Run(run func()) *Sender_URI_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Sender_URI_Call) Return(_a0 string) *Sender_URI_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Sender_URI_Call) RunAndReturn(run func() string) *Sender_URI_Call { + _c.Call.Return(run) + return _c +} + +// NewSender creates a new instance of Sender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSender(t interface { + mock.TestingT + Cleanup(func()) +}) *Sender { + mock := &Sender{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/testing/mocks/pkg/control/v2/client/client_mock.go b/testing/mocks/pkg/control/v2/client/client_mock.go index 203a2411d6a..281086ad6f3 100644 --- a/testing/mocks/pkg/control/v2/client/client_mock.go +++ b/testing/mocks/pkg/control/v2/client/client_mock.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License 2.0; // you may not use this file except in compliance with the Elastic License 2.0. -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.51.1. DO NOT EDIT. package client @@ -345,7 +345,7 @@ func (_c *Client_DiagnosticUnits_Call) RunAndReturn(run func(context.Context, .. return _c } -// Disconnect provides a mock function with given fields: +// Disconnect provides a mock function with no fields func (_m *Client) Disconnect() { _m.Called() } @@ -373,7 +373,7 @@ func (_c *Client_Disconnect_Call) Return() *Client_Disconnect_Call { } func (_c *Client_Disconnect_Call) RunAndReturn(run func()) *Client_Disconnect_Call { - _c.Call.Return(run) + _c.Run(run) return _c } From 9cc00609288c83d4bed05891443d3e794a5d8844 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:29:17 +0100 Subject: [PATCH 03/17] build(deps): bump github.com/elastic/elastic-agent-libs from 0.18.1 to 0.18.2 (#6621) * build(deps): bump github.com/elastic/elastic-agent-libs Bumps [github.com/elastic/elastic-agent-libs](https://github.com/elastic/elastic-agent-libs) from 0.18.1 to 0.18.2. - [Release notes](https://github.com/elastic/elastic-agent-libs/releases) - [Commits](https://github.com/elastic/elastic-agent-libs/compare/v0.18.1...v0.18.2) --- updated-dependencies: - dependency-name: github.com/elastic/elastic-agent-libs dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index a76ba107358..89f1f10eb9d 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1319,11 +1319,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.18.1 +Version: v0.18.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.18.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.18.2/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index a3b798727cd..4b2108269ae 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/elastic/beats/v7 v7.0.0-alpha2.0.20241212201805-58af1537830b github.com/elastic/elastic-agent-autodiscover v0.9.0 github.com/elastic/elastic-agent-client/v7 v7.17.0 - github.com/elastic/elastic-agent-libs v0.18.1 + github.com/elastic/elastic-agent-libs v0.18.2 github.com/elastic/elastic-agent-system-metrics v0.11.7 github.com/elastic/elastic-transport-go/v8 v8.6.0 github.com/elastic/go-elasticsearch/v8 v8.17.0 diff --git a/go.sum b/go.sum index 91c4567982e..5048a566ab3 100644 --- a/go.sum +++ b/go.sum @@ -448,8 +448,8 @@ github.com/elastic/elastic-agent-autodiscover v0.9.0 h1:+iWIKh0u3e8I+CJa3FfWe9h0 github.com/elastic/elastic-agent-autodiscover v0.9.0/go.mod h1:5iUxLHhVdaGSWYTveSwfJEY4RqPXTG13LPiFoxcpFd4= github.com/elastic/elastic-agent-client/v7 v7.17.0 h1:TPLrEHF4kJ3RkmQzZPffrniY4WeW4vriHZbOAzM1hFo= github.com/elastic/elastic-agent-client/v7 v7.17.0/go.mod h1:6h+f9QdIr3GO2ODC0Y8+aEXRwzbA5W4eV4dd/67z7nI= -github.com/elastic/elastic-agent-libs v0.18.1 h1:dE6jf/D9bP8eRMQsV7KKpKV/G8zQzwMFBTj1w4e716c= -github.com/elastic/elastic-agent-libs v0.18.1/go.mod h1:rWdyrrAFzZwgNNi41Tsqhlt2c2GdXWhCEwcsnqISJ2U= +github.com/elastic/elastic-agent-libs v0.18.2 h1:jQrGytcG67YEhK9JzUhM1Yb6j9Ied68iYrWK8mlIV5M= +github.com/elastic/elastic-agent-libs v0.18.2/go.mod h1:rWdyrrAFzZwgNNi41Tsqhlt2c2GdXWhCEwcsnqISJ2U= github.com/elastic/elastic-agent-system-metrics v0.11.7 h1:1xm2okCM0eQZ4jivZgUFSlt6HAn/nPgKB/Fj8eLG6mY= github.com/elastic/elastic-agent-system-metrics v0.11.7/go.mod h1:nzkrGajQA29YNcfP62gfzhxX9an3/xdQ3RmfQNw9YTI= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= From 0d5a1ad397987bc4d53856a9a4685a50cea5d6d1 Mon Sep 17 00:00:00 2001 From: Andrzej Stencel Date: Wed, 29 Jan 2025 13:30:27 +0100 Subject: [PATCH 04/17] docs(otel): sort OTel components (#6616) * docs(otel): sort OTel components Modifies the `mage otel:readme` command to sort the OTel components by name in the OTel README. * refactor: sort complete lists instead of while inserting --- internal/pkg/otel/README.md | 34 +++++++++++++++++----------------- magefile.go | 5 +++++ 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/internal/pkg/otel/README.md b/internal/pkg/otel/README.md index cad216746d5..56447e1d71c 100644 --- a/internal/pkg/otel/README.md +++ b/internal/pkg/otel/README.md @@ -35,30 +35,30 @@ This section provides a summary of components included in the Elastic Distributi | Component | Version | |---|---| -| [jaegerreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/jaegerreceiver/v0.117.0/receiver/jaegerreceiver/README.md) | v0.117.0 | -| [jmxreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/jmxreceiver/v0.117.0/receiver/jmxreceiver/README.md) | v0.117.0 | -| [kafkareceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/kafkareceiver/v0.117.0/receiver/kafkareceiver/README.md) | v0.117.0 | -| [prometheusreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/prometheusreceiver/v0.117.0/receiver/prometheusreceiver/README.md) | v0.117.0 | -| [receivercreator](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/receivercreator/v0.117.0/receiver/receivercreator/README.md) | v0.117.0 | -| [zipkinreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/zipkinreceiver/v0.117.0/receiver/zipkinreceiver/README.md) | v0.117.0 | -| [nopreceiver](https://github.com/open-telemetry/opentelemetry-collector/blob/receiver/nopreceiver/v0.117.0/receiver/nopreceiver/README.md) | v0.117.0 | | [filelogreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/filelogreceiver/v0.117.0/receiver/filelogreceiver/README.md) | v0.117.0 | | [hostmetricsreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/hostmetricsreceiver/v0.117.0/receiver/hostmetricsreceiver/README.md) | v0.117.0 | | [httpcheckreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/httpcheckreceiver/v0.117.0/receiver/httpcheckreceiver/README.md) | v0.117.0 | +| [jaegerreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/jaegerreceiver/v0.117.0/receiver/jaegerreceiver/README.md) | v0.117.0 | +| [jmxreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/jmxreceiver/v0.117.0/receiver/jmxreceiver/README.md) | v0.117.0 | | [k8sclusterreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/k8sclusterreceiver/v0.117.0/receiver/k8sclusterreceiver/README.md) | v0.117.0 | | [k8sobjectsreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/k8sobjectsreceiver/v0.117.0/receiver/k8sobjectsreceiver/README.md) | v0.117.0 | +| [kafkareceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/kafkareceiver/v0.117.0/receiver/kafkareceiver/README.md) | v0.117.0 | | [kubeletstatsreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/kubeletstatsreceiver/v0.117.0/receiver/kubeletstatsreceiver/README.md) | v0.117.0 | +| [nopreceiver](https://github.com/open-telemetry/opentelemetry-collector/blob/receiver/nopreceiver/v0.117.0/receiver/nopreceiver/README.md) | v0.117.0 | | [otlpreceiver](https://github.com/open-telemetry/opentelemetry-collector/blob/receiver/otlpreceiver/v0.117.0/receiver/otlpreceiver/README.md) | v0.117.0 | +| [prometheusreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/prometheusreceiver/v0.117.0/receiver/prometheusreceiver/README.md) | v0.117.0 | +| [receivercreator](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/receivercreator/v0.117.0/receiver/receivercreator/README.md) | v0.117.0 | +| [zipkinreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/zipkinreceiver/v0.117.0/receiver/zipkinreceiver/README.md) | v0.117.0 | ### Exporters | Component | Version | |---|---| -| [kafkaexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/exporter/kafkaexporter/v0.117.0/exporter/kafkaexporter/README.md) | v0.117.0 | -| [loadbalancingexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/exporter/loadbalancingexporter/v0.117.0/exporter/loadbalancingexporter/README.md) | v0.117.0 | +| [debugexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/exporter/debugexporter/v0.117.0/exporter/debugexporter/README.md) | v0.117.0 | | [elasticsearchexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/exporter/elasticsearchexporter/v0.117.0/exporter/elasticsearchexporter/README.md) | v0.117.0 | | [fileexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/exporter/fileexporter/v0.117.0/exporter/fileexporter/README.md) | v0.117.0 | -| [debugexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/exporter/debugexporter/v0.117.0/exporter/debugexporter/README.md) | v0.117.0 | +| [kafkaexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/exporter/kafkaexporter/v0.117.0/exporter/kafkaexporter/README.md) | v0.117.0 | +| [loadbalancingexporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/exporter/loadbalancingexporter/v0.117.0/exporter/loadbalancingexporter/README.md) | v0.117.0 | | [otlpexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/exporter/otlpexporter/v0.117.0/exporter/otlpexporter/README.md) | v0.117.0 | | [otlphttpexporter](https://github.com/open-telemetry/opentelemetry-collector/blob/exporter/otlphttpexporter/v0.117.0/exporter/otlphttpexporter/README.md) | v0.117.0 | @@ -66,35 +66,35 @@ This section provides a summary of components included in the Elastic Distributi | Component | Version | |---|---| +| [attributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/attributesprocessor/v0.117.0/processor/attributesprocessor/README.md) | v0.117.0 | +| [batchprocessor](https://github.com/open-telemetry/opentelemetry-collector/blob/processor/batchprocessor/v0.117.0/processor/batchprocessor/README.md) | v0.117.0 | | [elasticinframetricsprocessor](https://github.com/elastic/opentelemetry-collector-components/blob/processor/elasticinframetricsprocessor/v0.13.0/processor/elasticinframetricsprocessor/README.md) | v0.13.0 | | [elastictraceprocessor](https://github.com/elastic/opentelemetry-collector-components/blob/processor/elastictraceprocessor/v0.3.0/processor/elastictraceprocessor/README.md) | v0.3.0 | -| [lsmintervalprocessor](https://github.com/elastic/opentelemetry-collector-components/blob/processor/lsmintervalprocessor/v0.3.0/processor/lsmintervalprocessor/README.md) | v0.3.0 | -| [memorylimiterprocessor](https://github.com/open-telemetry/opentelemetry-collector/blob/processor/memorylimiterprocessor/v0.117.0/processor/memorylimiterprocessor/README.md) | v0.117.0 | -| [attributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/attributesprocessor/v0.117.0/processor/attributesprocessor/README.md) | v0.117.0 | | [filterprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/filterprocessor/v0.117.0/processor/filterprocessor/README.md) | v0.117.0 | | [geoipprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/geoipprocessor/v0.117.0/processor/geoipprocessor/README.md) | v0.117.0 | | [k8sattributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/k8sattributesprocessor/v0.117.0/processor/k8sattributesprocessor/README.md) | v0.117.0 | +| [lsmintervalprocessor](https://github.com/elastic/opentelemetry-collector-components/blob/processor/lsmintervalprocessor/v0.3.0/processor/lsmintervalprocessor/README.md) | v0.3.0 | +| [memorylimiterprocessor](https://github.com/open-telemetry/opentelemetry-collector/blob/processor/memorylimiterprocessor/v0.117.0/processor/memorylimiterprocessor/README.md) | v0.117.0 | | [resourcedetectionprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/resourcedetectionprocessor/v0.117.0/processor/resourcedetectionprocessor/README.md) | v0.117.0 | | [resourceprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/resourceprocessor/v0.117.0/processor/resourceprocessor/README.md) | v0.117.0 | | [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/transformprocessor/v0.117.0/processor/transformprocessor/README.md) | v0.117.0 | -| [batchprocessor](https://github.com/open-telemetry/opentelemetry-collector/blob/processor/batchprocessor/v0.117.0/processor/batchprocessor/README.md) | v0.117.0 | ### Extensions | Component | Version | |---|---| +| [filestorage](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/extension/storage/filestorage/v0.117.0/extension/storage/filestorage/README.md) | v0.117.0 | | [healthcheckextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/extension/healthcheckextension/v0.117.0/extension/healthcheckextension/README.md) | v0.117.0 | | [k8sobserver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/extension/observer/k8sobserver/v0.117.0/extension/observer/k8sobserver/README.md) | v0.117.0 | -| [pprofextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/extension/pprofextension/v0.117.0/extension/pprofextension/README.md) | v0.117.0 | -| [filestorage](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/extension/storage/filestorage/v0.117.0/extension/storage/filestorage/README.md) | v0.117.0 | | [memorylimiterextension](https://github.com/open-telemetry/opentelemetry-collector/blob/extension/memorylimiterextension/v0.117.0/extension/memorylimiterextension/README.md) | v0.117.0 | +| [pprofextension](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/extension/pprofextension/v0.117.0/extension/pprofextension/README.md) | v0.117.0 | ### Connectors | Component | Version | |---|---| -| [signaltometricsconnector](https://github.com/elastic/opentelemetry-collector-components/blob/connector/signaltometricsconnector/v0.3.0/connector/signaltometricsconnector/README.md) | v0.3.0 | | [routingconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/connector/routingconnector/v0.117.0/connector/routingconnector/README.md) | v0.117.0 | +| [signaltometricsconnector](https://github.com/elastic/opentelemetry-collector-components/blob/connector/signaltometricsconnector/v0.3.0/connector/signaltometricsconnector/README.md) | v0.3.0 | | [spanmetricsconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/connector/spanmetricsconnector/v0.117.0/connector/spanmetricsconnector/README.md) | v0.117.0 | ## Persistence in OpenTelemetry Collector diff --git a/magefile.go b/magefile.go index ed80a77b7c1..7957817daf6 100644 --- a/magefile.go +++ b/magefile.go @@ -26,6 +26,7 @@ import ( "regexp" "runtime" "slices" + "sort" "strconv" "strings" "sync" @@ -3301,6 +3302,10 @@ func getOtelDependencies() (*otelDependencies, error) { } } + for _, list := range [][]*otelDependency{connectors, exporters, extensions, processors, receivers} { + sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) + } + return &otelDependencies{ Connectors: connectors, Exporters: exporters, From 093fb580b278791fda9b4c093121268778e92021 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 29 Jan 2025 16:51:52 +0100 Subject: [PATCH 05/17] Changes to binary distribution (#6542) --------- Co-authored-by: Andrzej Stencel --- _meta/.flavors | 13 + ...131552-Changes-to-binary-distribution.yaml | 43 ++ dev-tools/mage/settings.go | 52 +- .../agent/application/upgrade/step_unpack.go | 181 ++++++- .../application/upgrade/step_unpack_test.go | 210 +++++++- .../pkg/agent/application/upgrade/upgrade.go | 11 +- internal/pkg/agent/cmd/install.go | 9 +- internal/pkg/agent/install/flavors.go | 229 +++++++++ internal/pkg/agent/install/flavors_test.go | 460 ++++++++++++++++++ internal/pkg/agent/install/install.go | 40 +- internal/pkg/agent/install/install_test.go | 2 +- pkg/api/v1/manifest.go | 1 + pkg/component/load.go | 33 ++ pkg/component/load_test.go | 103 ++++ pkg/testing/fixture_install.go | 5 + specs/apm-server.spec.yml | 4 + specs/cloudbeat.spec.yml | 2 + specs/endpoint-security.spec.yml | 2 + testing/integration/fleetserver_test.go | 2 + testing/integration/install_test.go | 152 ++++++ .../upgrade_standalone_same_commit_test.go | 4 +- 21 files changed, 1536 insertions(+), 22 deletions(-) create mode 100644 _meta/.flavors create mode 100644 changelog/fragments/1737131552-Changes-to-binary-distribution.yaml create mode 100644 internal/pkg/agent/install/flavors.go create mode 100644 internal/pkg/agent/install/flavors_test.go diff --git a/_meta/.flavors b/_meta/.flavors new file mode 100644 index 00000000000..42358f5c5a4 --- /dev/null +++ b/_meta/.flavors @@ -0,0 +1,13 @@ +basic: + - agentbeat + - endpoint-security + - pf-host-agent +servers: + - agentbeat + - endpoint-security + - pf-host-agent + - cloudbeat + - apm-server + - fleet-server + - pf-elastic-symbolizer + - pf-elastic-collector diff --git a/changelog/fragments/1737131552-Changes-to-binary-distribution.yaml b/changelog/fragments/1737131552-Changes-to-binary-distribution.yaml new file mode 100644 index 00000000000..ead9992d0de --- /dev/null +++ b/changelog/fragments/1737131552-Changes-to-binary-distribution.yaml @@ -0,0 +1,43 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: breaking-change + +# Change summary; a 80ish characters long description of the change. +summary: Changes to binary distribution + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +description: | + Default install installs only: + - agentbeat + - endpoint-security + - pf-host-agent + additional flag is added that includes components above and: + - cloudbeat + - apm-server + - fleet-server + - pf-elastic-symbolizer + - pf-elastic-collector + + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/dev-tools/mage/settings.go b/dev-tools/mage/settings.go index 32bc62a3e97..3318e361968 100644 --- a/dev-tools/mage/settings.go +++ b/dev-tools/mage/settings.go @@ -346,10 +346,15 @@ func PackageManifest() (string, error) { return "", fmt.Errorf("retrieving agent commit hash: %w", err) } - return GeneratePackageManifest(BeatName, packageVersion, Snapshot, hash, commitHashShort) + registry, err := loadFlavorsRegistry() + if err != nil { + return "", fmt.Errorf("retrieving agent flavors: %w", err) + } + + return GeneratePackageManifest(BeatName, packageVersion, Snapshot, hash, commitHashShort, registry) } -func GeneratePackageManifest(beatName, packageVersion string, snapshot bool, fullHash, shortHash string) (string, error) { +func GeneratePackageManifest(beatName, packageVersion string, snapshot bool, fullHash, shortHash string, flavorsRegistry map[string][]string) (string, error) { m := v1.NewManifest() m.Package.Version = packageVersion m.Package.Snapshot = snapshot @@ -360,6 +365,7 @@ func GeneratePackageManifest(beatName, packageVersion string, snapshot bool, ful m.Package.PathMappings = []map[string]string{{}} m.Package.PathMappings[0][versionedHomePath] = fmt.Sprintf("data/%s-%s%s-%s", beatName, m.Package.Version, GenerateSnapshotSuffix(snapshot), shortHash) m.Package.PathMappings[0][v1.ManifestFileName] = fmt.Sprintf("data/%s-%s%s-%s/%s", beatName, m.Package.Version, GenerateSnapshotSuffix(snapshot), shortHash, v1.ManifestFileName) + m.Package.Flavors = flavorsRegistry yamlBytes, err := yaml.Marshal(m) if err != nil { return "", fmt.Errorf("marshaling manifest: %w", err) @@ -461,6 +467,10 @@ var ( beatVersionValue string beatVersionErr error beatVersionOnce sync.Once + + flavorsRegistry map[string][]string + flavorsRegistryErr error + flavorsOnce sync.Once ) // BeatQualifiedVersion returns the Beat's qualified version. The value can be overwritten by @@ -492,6 +502,14 @@ func beatVersion() (string, error) { return beatVersionValue, beatVersionErr } +func loadFlavorsRegistry() (map[string][]string, error) { + flavorsOnce.Do(func() { + flavorsRegistry, flavorsRegistryErr = getBuildVariableSources().GetFlavorsRegistry() + }) + + return flavorsRegistry, flavorsRegistryErr +} + var ( beatDocBranchRegex = regexp.MustCompile(`(?m)doc-branch:\s*([^\s]+)\r?$`) beatDocSiteBranchRegex = regexp.MustCompile(`(?m)doc-site-branch:\s*([^\s]+)\r?$`) @@ -521,9 +539,10 @@ var ( // DefaultBeatBuildVariableSources contains the default locations build // variables are read from by Elastic Beats. DefaultBeatBuildVariableSources = &BuildVariableSources{ - BeatVersion: "{{ elastic_beats_dir }}/version/version.go", - GoVersion: "{{ elastic_beats_dir }}/.go-version", - DocBranch: "{{ elastic_beats_dir }}/version/docs/version.asciidoc", + BeatVersion: "{{ elastic_beats_dir }}/version/version.go", + GoVersion: "{{ elastic_beats_dir }}/.go-version", + DocBranch: "{{ elastic_beats_dir }}/version/docs/version.asciidoc", + FlavorsRegistry: "{{ elastic_beats_dir }}/_meta/.flavors", } buildVariableSources *BuildVariableSources @@ -584,6 +603,9 @@ type BuildVariableSources struct { // Parses the documentation branch from the DocBranch file. DocBranchParser func(data []byte) (string, error) + + // File containing definition of flavors. + FlavorsRegistry string } func (s *BuildVariableSources) expandVar(in string) (string, error) { @@ -628,6 +650,26 @@ func (s *BuildVariableSources) GetGoVersion() (string, error) { return s.GoVersionParser(data) } +// GetFlavorsRegistry reads the flavors file and parses the list of components of it. +func (s *BuildVariableSources) GetFlavorsRegistry() (map[string][]string, error) { + file, err := s.expandVar(s.FlavorsRegistry) + if err != nil { + return nil, err + } + + data, err := os.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("failed to read flavors from file=%v: %w", file, err) + } + + registry := make(map[string][]string) + if err := yaml.Unmarshal(data, registry); err != nil { + return nil, fmt.Errorf("failed to parse flavors: %w", err) + } + + return registry, nil +} + // GetDocBranch reads the DocBranch file and parses the branch from it. func (s *BuildVariableSources) GetDocBranch() (string, error) { file, err := s.expandVar(s.DocBranch) diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 90cc99c6934..0685ae07855 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -20,7 +20,9 @@ import ( "strings" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/install" v1 "github.com/elastic/elastic-agent/pkg/api/v1" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -34,15 +36,15 @@ type UnpackResult struct { } // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* -func (u *Upgrader) unpack(version, archivePath, dataDir string) (UnpackResult, error) { +func (u *Upgrader) unpack(version, archivePath, dataDir string, flavor string) (UnpackResult, error) { // unpack must occur in directory that holds the installation directory // or the extraction will be double nested var unpackRes UnpackResult var err error if runtime.GOOS == windows { - unpackRes, err = unzip(u.log, archivePath, dataDir) + unpackRes, err = unzip(u.log, archivePath, dataDir, flavor) } else { - unpackRes, err = untar(u.log, archivePath, dataDir) + unpackRes, err = untar(u.log, archivePath, dataDir, flavor) } if err != nil { @@ -76,7 +78,7 @@ func (u *Upgrader) getPackageMetadata(archivePath string) (packageMetadata, erro } } -func unzip(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error) { +func unzip(log *logger.Logger, archivePath, dataDir string, flavor string) (UnpackResult, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { @@ -95,15 +97,21 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error } hash = metadata.hash[:hashLen] - + var registry map[string][]string if metadata.manifest != nil { pm.mappings = metadata.manifest.Package.PathMappings versionedHome = filepath.FromSlash(pm.Map(metadata.manifest.Package.VersionedHome)) + registry = metadata.manifest.Package.Flavors } else { // if at this point we didn't load the manifest, set the versioned to the backup value versionedHome = createVersionedHomeFromHash(hash) } + skipFn, err := skipFnFromZip(log, r, flavor, fileNamePrefix, createVersionedHomeFromHash(hash), registry) + if err != nil { + return UnpackResult{}, err + } + unpackFile := func(f *zip.File) (err error) { rc, err := f.Open() if err != nil { @@ -131,6 +139,10 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error dstPath := strings.TrimPrefix(mappedPackagePath, "data/") dstPath = filepath.Join(dataDir, dstPath) + if skipFn(dstPath) { + return nil + } + if f.FileInfo().IsDir() { log.Debugw("Unpacking directory", "archive", "zip", "file.path", dstPath) // check if the directory already exists @@ -194,6 +206,8 @@ func unzip(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error }, nil } +// getPackageMetadataFromZip reads an archive on a path archivePath and parses metadata from manifest file +// located inside an archive func getPackageMetadataFromZip(archivePath string) (packageMetadata, error) { r, err := zip.OpenReader(archivePath) if err != nil { @@ -204,6 +218,58 @@ func getPackageMetadataFromZip(archivePath string) (packageMetadata, error) { return getPackageMetadataFromZipReader(r, fileNamePrefix) } +func skipFnFromZip(log *logger.Logger, r *zip.ReadCloser, detectedFlavor string, fileNamePrefix string, versionedHome string, registry map[string][]string) (install.SkipFn, error) { + if detectedFlavor == "" { + // no flavor don't skip anything + return func(relPath string) bool { return false }, nil + } + + flavor, err := install.Flavor(detectedFlavor, "", registry) + if err != nil { + if errors.Is(err, install.ErrUnknownFlavor) { + // unknown flavor fallback to copy all + return func(relPath string) bool { return false }, nil + } + return nil, err + } + specsInFlavor := install.SpecsForFlavor(flavor) // ignoring error flavor exists, it was loaded before + + // fix versionedHome + versionedHome = strings.ReplaceAll(versionedHome, "\\", "/") + + readFile := func(specFilePath string) ([]byte, error) { + f, err := r.Open(specFilePath) + if err != nil { + return nil, err + } + defer f.Close() + + return io.ReadAll(f) + } + + var allowedPaths []string + for _, spec := range specsInFlavor { + specFilePath := path.Join(fileNamePrefix, versionedHome, "components", spec) + + contentBytes, err := readFile(specFilePath) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, err + } + + paths, err := component.ParseComponentFiles(contentBytes, specFilePath, true) + if err != nil { + return nil, errors.New("failed to read paths from %q: %v", specFilePath, err) + } + allowedPaths = append(allowedPaths, paths...) + + } + + return install.SkipComponentsPathWithSubpathsFn(allowedPaths) +} + func getPackageMetadataFromZipReader(r *zip.ReadCloser, fileNamePrefix string) (packageMetadata, error) { ret := packageMetadata{} @@ -241,8 +307,7 @@ func getPackageMetadataFromZipReader(r *zip.ReadCloser, fileNamePrefix string) ( return ret, nil } -func untar(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error) { - +func untar(log *logger.Logger, archivePath, dataDir string, flavor string) (UnpackResult, error) { var versionedHome string var rootDir string var hash string @@ -256,16 +321,23 @@ func untar(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error } hash = metadata.hash[:hashLen] + var registry map[string][]string if metadata.manifest != nil { // set the path mappings pm.mappings = metadata.manifest.Package.PathMappings versionedHome = filepath.FromSlash(pm.Map(metadata.manifest.Package.VersionedHome)) + registry = metadata.manifest.Package.Flavors } else { // set default value of versioned home if it wasn't set by reading the manifest versionedHome = createVersionedHomeFromHash(metadata.hash) } + skipFn, err := skipFnFromTar(log, archivePath, flavor, registry) + if err != nil { + return UnpackResult{}, err + } + r, err := os.Open(archivePath) if err != nil { return UnpackResult{}, errors.New(fmt.Sprintf("artifact for 'elastic-agent' could not be found at '%s'", archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) @@ -316,6 +388,10 @@ func untar(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error continue } + if skipFn(fileName) { + continue + } + rel := filepath.FromSlash(strings.TrimPrefix(fileName, "data/")) abs := filepath.Join(dataDir, rel) @@ -378,6 +454,97 @@ func untar(log *logger.Logger, archivePath, dataDir string) (UnpackResult, error }, nil } +func skipFnFromTar(log *logger.Logger, archivePath string, flavor string, registry map[string][]string) (install.SkipFn, error) { + if flavor == "" { + // no flavor don't skip anything + return func(relPath string) bool { return false }, nil + } + + fileNamePrefix := getFileNamePrefix(archivePath) + loadFlavor := func(flavor string) install.FlavorDefinition { + components, found := registry[flavor] + if !found { + return install.FlavorDefinition{} + } + + return install.FlavorDefinition{Name: flavor, Components: components} + } + + // scan tar archive for spec file and extract allowed paths + r, err := os.Open(archivePath) + if err != nil { + return nil, errors.New(fmt.Sprintf("artifact for 'elastic-agent' could not be found at '%s'", archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) + } + defer r.Close() + + zr, err := gzip.NewReader(r) + if err != nil { + return nil, errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) + } + + tr := tar.NewReader(zr) + + var allowedPaths []string + flavorDefinition := loadFlavor(flavor) + specs, err := specRegistry(flavorDefinition) + if err != nil { + return nil, err + } + // go through all the content of a tar archive + // if elastic-agent.active.commit file is found, get commit of the version unpacked + // otherwise copy everything inside data directory (everything related to new version), + // pieces outside of data we already have and should not be overwritten as they are usually configs + for { + f, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + + fileName := strings.TrimPrefix(f.Name, fileNamePrefix) + + // skip everything outside components/ and everything that's not spec file. + // checking for spec files ourside of components just to be sure + if !strings.Contains(fileName, "/components/") || !strings.HasSuffix(fileName, "spec.yml") { + continue + } + + if _, specInRegistry := specs[filepath.Base(fileName)]; !specInRegistry { + // component not present in a package, skip processing + continue + } + + fi := f.FileInfo() + mode := fi.Mode() + switch { + case mode.IsRegular(): + contentBytes, err := io.ReadAll(tr) + if err != nil { + return nil, errors.New("failed to read %q: %v", fileName, err) + } + paths, err := component.ParseComponentFiles(contentBytes, fileName, true) + if err != nil { + return nil, errors.New("failed to read paths from %q: %v", fileName, err) + } + + allowedPaths = append(allowedPaths, paths...) + } + } + + return install.SkipComponentsPathWithSubpathsFn(allowedPaths) +} + +func specRegistry(flavor install.FlavorDefinition) (map[string]struct{}, error) { + specs := install.SpecsForFlavor(flavor) + registry := make(map[string]struct{}) + for _, s := range specs { + registry[s] = struct{}{} + } + return registry, nil +} + func getPackageMetadataFromTar(archivePath string) (packageMetadata, error) { // quickly open the archive and look up manifest.yaml file fileContents, err := getFilesContentFromTar(archivePath, v1.ManifestFileName, agentCommitFile) diff --git a/internal/pkg/agent/application/upgrade/step_unpack_test.go b/internal/pkg/agent/application/upgrade/step_unpack_test.go index 295a5d3dbec..79af3b9843d 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack_test.go +++ b/internal/pkg/agent/application/upgrade/step_unpack_test.go @@ -35,6 +35,14 @@ package: version: 1.2.3 snapshot: true versioned-home: data/elastic-agent-abcdef + flavors: + basic: + - comp1 + - comp2 + servers: + - comp1 + - comp2 + - comp3 path-mappings: - data/elastic-agent-abcdef: data/elastic-agent-1.2.3-SNAPSHOT-abcdef manifest.yaml: data/elastic-agent-1.2.3-SNAPSHOT-abcdef/manifest.yaml @@ -60,6 +68,72 @@ inputs: - baz ` +const foo_component_spec_with_dirs = ` +component_files: + - component_dir/* +version: 2 +inputs: + - name: foobar + description: "Foo input" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + outputs: + - elasticsearch + - kafka + - logstash + command: + args: + - foo + - bar + - baz +` +const foo_component_spec_with_archive = ` +component_files: + - component.zip +version: 2 +inputs: + - name: foobar + description: "Foo input" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + outputs: + - elasticsearch + - kafka + - logstash + command: + args: + - foo + - bar + - baz +` + +var archiveFilesWithMoreComponents = []files{ + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + v1.ManifestFileName, content: ea_123_manifest, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + agentCommitFile, content: "abcdefghijklmnopqrstuvwxyz", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/" + agentName, content: agentBinaryPlaceholderContent, mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/package.version", content: "1.2.3", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1", binary: true, content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1", content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp1.spec.yml", content: foo_component_spec, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp2", binary: true, content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp2.spec.yml", content: foo_component_spec_with_dirs, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp3", binary: true, content: "Placeholder for component", mode: fs.ModePerm & 0o750}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/comp3.spec.yml", content: foo_component_spec_with_archive, mode: fs.ModePerm & 0o640}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/component.zip", content: "inner file content", mode: fs.ModePerm & 0o640}, + {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/component_dir", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, + {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/data/elastic-agent-abcdef/components/component_dir/inner_file", content: "inner file content", mode: fs.ModePerm & 0o640}, +} + var archiveFilesWithManifestNoSymlink = []files{ {fType: DIRECTORY, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64", mode: fs.ModeDir | (fs.ModePerm & 0o750)}, {fType: REGULAR, path: "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64/" + v1.ManifestFileName, content: ea_123_manifest, mode: fs.ModePerm & 0o640}, @@ -100,6 +174,7 @@ type files struct { path string content string mode fs.FileMode + binary bool } func (f files) Name() string { @@ -136,12 +211,18 @@ func TestUpgrader_unpackTarGz(t *testing.T) { archiveFiles []files } + binarySuffix := "" + if runtime.GOOS == "windows" { + binarySuffix = ".exe" + } + tests := []struct { name string args args want UnpackResult wantErr assert.ErrorAssertionFunc checkFiles checkExtractedPath + flavor string }{ { name: "file before containing folder", @@ -178,6 +259,57 @@ func TestUpgrader_unpackTarGz(t *testing.T) { wantErr: assert.NoError, checkFiles: checkExtractedFilesWithManifest, }, + { + name: "package with basic flavor", + args: args{ + version: "1.2.3", + archiveFiles: append(archiveFilesWithMoreComponents, agentArchiveSymLink), + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.tar.gz", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), + }, + wantErr: assert.NoError, + flavor: "basic", + checkFiles: func(t *testing.T, testDataDir string) { + checkFilesPresence(t, testDataDir, + []string{ + filepath.Join("components", "comp1"+binarySuffix), filepath.Join("components", "comp1.spec.yml"), + filepath.Join("components", "comp2"+binarySuffix), filepath.Join("components", "comp2.spec.yml"), + filepath.Join("components", "component_dir", "inner_file"), + }, + []string{filepath.Join("components", "comp3"), filepath.Join("components", "comp3.spec.yml"), filepath.Join("components", "component.zip")}) + }, + }, + { + name: "package with servers flavor", + args: args{ + version: "1.2.3", + archiveFiles: append(archiveFilesWithMoreComponents, agentArchiveSymLink), + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createTarArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.tar.gz", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), + }, + wantErr: assert.NoError, + flavor: "servers", + checkFiles: func(t *testing.T, testDataDir string) { + checkFilesPresence(t, testDataDir, + []string{ + filepath.Join("components", "comp1"+binarySuffix), filepath.Join("components", "comp1.spec.yml"), + filepath.Join("components", "comp2"+binarySuffix), filepath.Join("components", "comp2.spec.yml"), + filepath.Join("components", "component_dir", "inner_file"), + filepath.Join("components", "comp3"+binarySuffix), filepath.Join("components", "comp3.spec.yml"), filepath.Join("components", "component.zip"), + }, + []string{}) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -190,7 +322,7 @@ func TestUpgrader_unpackTarGz(t *testing.T) { archiveFile, err := tt.args.archiveGenerator(t, tt.args.archiveFiles) require.NoError(t, err, "creation of test archive file failed") - got, err := untar(log, archiveFile, testDataDir) + got, err := untar(log, archiveFile, testDataDir, tt.flavor) if !tt.wantErr(t, err, fmt.Sprintf("untar(%v, %v, %v)", tt.args.version, archiveFile, testDataDir)) { return } @@ -208,12 +340,18 @@ func TestUpgrader_unpackZip(t *testing.T) { archiveFiles []files } + binarySuffix := "" + if runtime.GOOS == "windows" { + binarySuffix = ".exe" + } + tests := []struct { name string args args want UnpackResult wantErr assert.ErrorAssertionFunc checkFiles checkExtractedPath + flavor string }{ { name: "file before containing folder", @@ -248,6 +386,56 @@ func TestUpgrader_unpackZip(t *testing.T) { wantErr: assert.NoError, checkFiles: checkExtractedFilesWithManifest, }, + + { + name: "package with basic flavor", + args: args{ + archiveFiles: archiveFilesWithMoreComponents, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createZipArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.zip", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), + }, + wantErr: assert.NoError, + flavor: "basic", + checkFiles: func(t *testing.T, testDataDir string) { + checkFilesPresence(t, testDataDir, + []string{ + filepath.Join("components", "comp1"+binarySuffix), filepath.Join("components", "comp1.spec.yml"), + filepath.Join("components", "comp2"+binarySuffix), filepath.Join("components", "comp2.spec.yml"), + filepath.Join("components", "component_dir", "inner_file"), + }, + []string{filepath.Join("components", "comp3"+binarySuffix), filepath.Join("components", "comp3.spec.yml"), filepath.Join("components", "component.zip")}) + }, + }, + { + name: "package with servers flavor", + args: args{ + archiveFiles: archiveFilesWithMoreComponents, + archiveGenerator: func(t *testing.T, i []files) (string, error) { + return createZipArchive(t, "elastic-agent-1.2.3-SNAPSHOT-someos-x86_64.zip", i) + }, + }, + want: UnpackResult{ + Hash: "abcdef", + VersionedHome: filepath.Join("data", "elastic-agent-1.2.3-SNAPSHOT-abcdef"), + }, + wantErr: assert.NoError, + flavor: "servers", + checkFiles: func(t *testing.T, testDataDir string) { + checkFilesPresence(t, testDataDir, + []string{ + filepath.Join("components", "comp1"+binarySuffix), filepath.Join("components", "comp1.spec.yml"), + filepath.Join("components", "comp2"+binarySuffix), filepath.Join("components", "comp2.spec.yml"), + filepath.Join("components", "component_dir", "inner_file"), + filepath.Join("components", "comp3"+binarySuffix), filepath.Join("components", "comp3.spec.yml"), filepath.Join("components", "component.zip"), + }, + []string{}) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -261,7 +449,7 @@ func TestUpgrader_unpackZip(t *testing.T) { archiveFile, err := tt.args.archiveGenerator(t, tt.args.archiveFiles) require.NoError(t, err, "creation of test archive file failed") - got, err := unzip(log, archiveFile, testDataDir) + got, err := unzip(log, archiveFile, testDataDir, tt.flavor) if !tt.wantErr(t, err, fmt.Sprintf("unzip(%v, %v)", archiveFile, testDataDir)) { return } @@ -312,6 +500,16 @@ func checkExtractedFilesWithManifest(t *testing.T, testDataDir string) { } } +func checkFilesPresence(t *testing.T, testDataDir string, requiredFiles, unwantedFiles []string) { + versionedHome := filepath.Join(testDataDir, "elastic-agent-1.2.3-SNAPSHOT-abcdef") + for _, f := range requiredFiles { + assert.FileExists(t, filepath.Join(versionedHome, f)) + } + for _, f := range unwantedFiles { + assert.NoFileExists(t, filepath.Join(versionedHome, f)) + } +} + func createTarArchive(t *testing.T, archiveName string, archiveFiles []files) (string, error) { outDir := t.TempDir() @@ -340,6 +538,10 @@ func createTarArchive(t *testing.T, archiveName string, archiveFiles []files) (s } func addEntryToTarArchive(af files, writer *tar.Writer) error { + if af.binary && runtime.GOOS == "windows" { + af.path += ".exe" + } + header, err := tar.FileInfoHeader(&af, af.content) if err != nil { return fmt.Errorf("creating header for %q: %w", af.path, err) @@ -391,6 +593,10 @@ func createZipArchive(t *testing.T, archiveName string, archiveFiles []files) (s } func addEntryToZipArchive(af files, writer *zip.Writer) error { + if af.binary && runtime.GOOS == "windows" { + af.path += ".exe" + } + header, err := zip.FileInfoHeader(&af) if err != nil { return fmt.Errorf("creating header for %q: %w", af.path, err) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 122aac54825..4d4cfb2882d 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -231,7 +231,16 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string u.log.Infow("Unpacking agent package", "version", newVersion) // Nice to have: add check that no archive files end up in the current versioned home - unpackRes, err := u.unpack(version, archivePath, paths.Data()) + // default to no flavor to avoid breaking behavior + + // no default flavor, keep everything in case flavor is not specified + // in case of error fallback to keep-all + detectedFlavor, err := install.UsedFlavor(paths.Top(), "") + if err != nil { + u.log.Warnf("error encountered when detecting used flavor with top path %q: %w", paths.Top(), err) + } + u.log.Debugf("detected used flavor: %q", detectedFlavor) + unpackRes, err := u.unpack(version, archivePath, paths.Data(), detectedFlavor) if err != nil { return nil, err } diff --git a/internal/pkg/agent/cmd/install.go b/internal/pkg/agent/cmd/install.go index 90090e3ecfd..34ecbb5c1f4 100644 --- a/internal/pkg/agent/cmd/install.go +++ b/internal/pkg/agent/cmd/install.go @@ -29,6 +29,7 @@ const ( flagInstallDevelopment = "develop" flagInstallNamespace = "namespace" flagInstallRunUninstallFromBinary = "run-uninstall-from-binary" + flagInstallServers = "install-servers" flagInstallCustomUser = "user" flagInstallCustomGroup = "group" @@ -57,6 +58,7 @@ would like the Agent to operate. cmd.Flags().BoolP("non-interactive", "n", false, "Install Elastic Agent in non-interactive mode which will not prompt on missing parameters but fails instead.") cmd.Flags().String(flagInstallBasePath, paths.DefaultBasePath, "The path where the Elastic Agent will be installed. It must be an absolute path.") cmd.Flags().Bool(flagInstallUnprivileged, false, "Install in unprivileged mode, limiting the access of the Elastic Agent. (beta)") + cmd.Flags().Bool(flagInstallServers, false, "Install larger version of agent that includes server components") cmd.Flags().Bool(flagInstallRunUninstallFromBinary, false, "Run the uninstall command from this binary instead of using the binary found in the system's path.") _ = cmd.Flags().MarkHidden(flagInstallRunUninstallFromBinary) // Advanced option to force a new agent to override an existing installation, it may orphan installed components. @@ -269,7 +271,12 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { customPass, _ = cmd.Flags().GetString(flagInstallCustomPass) } - ownership, err = install.Install(cfgFile, topPath, unprivileged, log, progBar, streams, customUser, customGroup, customPass) + flavor := install.DefaultFlavor + if installServers, _ := cmd.Flags().GetBool(flagInstallServers); installServers { + flavor = install.FlavorServers + } + + ownership, err = install.Install(cfgFile, topPath, unprivileged, log, progBar, streams, customUser, customGroup, customPass, flavor) if err != nil { return fmt.Errorf("error installing package: %w", err) } diff --git a/internal/pkg/agent/install/flavors.go b/internal/pkg/agent/install/flavors.go new file mode 100644 index 00000000000..61f84124ce9 --- /dev/null +++ b/internal/pkg/agent/install/flavors.go @@ -0,0 +1,229 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package install + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + v1 "github.com/elastic/elastic-agent/pkg/api/v1" + "github.com/elastic/elastic-agent/pkg/component" +) + +const ( + FlavorBasic = "basic" + FlavorServers = "servers" + + DefaultFlavor = FlavorBasic + flavorFileName = ".flavor" +) + +type SkipFn func(relPath string) bool + +var ErrUnknownFlavor = fmt.Errorf("unknown flavor") + +type FlavorDefinition struct { + Name string + Components []string +} + +func UsedFlavor(topPath, defaultFlavor string) (string, error) { + filename := filepath.Join(topPath, flavorFileName) + content, err := os.ReadFile(filename) + if err != nil { + // file does not exist, flavor was not marked probably due to earlier version + // fallback to default if defined + if defaultFlavor != "" && os.IsNotExist(err) { + return defaultFlavor, nil + } + + // failed reading flavor, do not break behavior and apply none as widest + return "", err + } + + return string(content), nil +} + +func Flavor(detectedFlavor string, registryPath string, flavorsRegistry map[string][]string) (FlavorDefinition, error) { + if flavorsRegistry == nil { + f, err := os.Open(registryPath) + if err != nil { + return FlavorDefinition{}, err + } + manifest, err := v1.ParseManifest(f) + if err != nil { + return FlavorDefinition{}, err + } + defer f.Close() + flavorsRegistry = manifest.Package.Flavors + } + + components, found := flavorsRegistry[detectedFlavor] + if !found { + return FlavorDefinition{}, ErrUnknownFlavor + } + + return FlavorDefinition{detectedFlavor, components}, nil +} + +// SpecsForFlavor returns spec files associated with specific flavor +func SpecsForFlavor(flavor FlavorDefinition) []string { + specs := []string{} + for _, component := range flavor.Components { + specs = append(specs, fmt.Sprintf("%s.spec.yml", component)) + } + + return specs +} + +// ApplyFlavor scans agent comonents directory and removes anything +// that is not mapped and needed for currently used flavor +func ApplyFlavor(versionedHome string, flavor FlavorDefinition) error { + skipFn, err := SkipComponentsPathFn(versionedHome, flavor) + if err != nil { + return err + } + + componentsDir := filepath.Join(versionedHome, "components") + filesToRemove := []string{} + + err = filepath.Walk(componentsDir, func(path string, info fs.FileInfo, err error) error { + if errors.Is(err, fs.ErrNotExist) { + return nil + } + if err != nil { + return fmt.Errorf("walk on %q failed: %w", componentsDir, err) + } + + if skipFn != nil && skipFn(path) { + // remove as file is not needed + filesToRemove = append(filesToRemove, path) + } + + return nil + }) + if err != nil { + return err + } + + for _, ftr := range filesToRemove { + if removeErr := os.RemoveAll(ftr); !os.IsNotExist(removeErr) { + err = removeErr + } + } + + return err +} + +// SkipComponentsPathWithSubpathsFn returns a skip function that returns true if +// path is not part of a any component associated with flavor. +// Paths are detected from spec files located in versionHome/components +func SkipComponentsPathFn(versionedHome string, flavor FlavorDefinition) (SkipFn, error) { + if flavor.Name == "" { + return func(relPath string) bool { return false }, nil + } + allowedSubpaths, err := allowedSubpathsForFlavor(versionedHome, flavor) + if err != nil { + return nil, err + } + + return SkipComponentsPathWithSubpathsFn(allowedSubpaths) +} + +// SkipComponentsPathWithSubpathsFn with already known set of allowed subpaths. +// allow list is not detected from spec files in this case +func SkipComponentsPathWithSubpathsFn(allowedSubpaths []string) (SkipFn, error) { + return func(relPath string) bool { + return skipComponentsPath(relPath, allowedSubpaths) + }, nil +} + +func skipComponentsPath(relPath string, allowedSubpaths []string) bool { + if allowedSubpaths == nil { + return false + } + if runtime.GOOS == "windows" { + relPath = strings.ReplaceAll(relPath, "\\", "/") + } + componentsDir := "/components/" + componentsIdx := strings.Index(relPath, componentsDir) + if componentsIdx == -1 { + // not a components subpath, not blocking + return false + } + + subPath := relPath[componentsIdx+len(componentsDir):] + + subDirsSuffix := `/*` + for _, allowedSubpath := range allowedSubpaths { + if allowedSubpath == subPath { + // exact match is allowed + return false + } + if strings.HasSuffix(allowedSubpath, subDirsSuffix) { + trimmed := strings.TrimSuffix(allowedSubpath, "*") + dirName := strings.TrimSuffix(allowedSubpath, subDirsSuffix) + // it is either same dir (create dir) or has dir prefix (copy content) + // do not evaluate true for subPath=abcd/ef and trimmed=ab + if subPath == dirName || strings.HasPrefix(subPath, trimmed) { + return false + } + } + } + + return true +} + +// markFlavor persists flavor used with agent. +// This mark is used during upgrades in order to upgrade to proper set. +func markFlavor(topPath string, flavor string) error { + filename := filepath.Join(topPath, flavorFileName) + if err := os.WriteFile(filename, []byte(flavor), 0o600); err != nil { + return fmt.Errorf("failed marking flavor: %w", err) + } + + return nil +} + +// allowedSubpathsForFlavor returns allowed /components/* subpath for specific flavors +// includes components, spec files, config files and other files specified in spec +func allowedSubpathsForFlavor(versionedHome string, flavor FlavorDefinition) ([]string, error) { + var sourceComponentsDir string + if versionedHome != "" { + sourceComponentsDir = filepath.Join(versionedHome, "components") + } + + allowedPaths := make([]string, 0) + for _, component := range flavor.Components { + subpaths, err := subpathsForComponent(component, sourceComponentsDir) + if err != nil { + return nil, err + } + allowedPaths = append(allowedPaths, subpaths...) + } + + return allowedPaths, nil +} + +func subpathsForComponent(componentName, sourceComponentsDir string) ([]string, error) { + if componentName == "" { + return nil, fmt.Errorf("empty component name") + } + specFilename := fmt.Sprintf("%s.spec.yml", componentName) + content, err := os.ReadFile(filepath.Join(sourceComponentsDir, specFilename)) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + return component.ParseComponentFiles(content, specFilename, true) +} diff --git a/internal/pkg/agent/install/flavors_test.go b/internal/pkg/agent/install/flavors_test.go new file mode 100644 index 00000000000..82542ec32b9 --- /dev/null +++ b/internal/pkg/agent/install/flavors_test.go @@ -0,0 +1,460 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package install + +import ( + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var flavorsRegistry = map[string][]string{ + "basic": {"agentbeat", "endpoint-security", "pf-host-agent"}, + "servers": {"agentbeat", "endpoint-security", "pf-host-agent", "cloudbeat", "apm-server", "fleet-server", "pf-elastic-symbolizer", "pf-elastic-collector"}, +} + +func TestSubpathsForComponent(t *testing.T) { + binarySuffix := "" + if runtime.GOOS == "windows" { + binarySuffix = ".exe" + } + tests := []struct { + name string + component string + wantError bool + errorContains string + wantSubpaths []string + specFileContent string + }{ + { + name: "empty component returns error", + component: "", + wantError: true, + errorContains: "empty component name", + }, + { + name: "basic component returns paths", + component: "agentbeat", + wantSubpaths: []string{ + "agentbeat" + binarySuffix, + "agentbeat.yml", + "agentbeat.spec.yml", + }, + specFileContent: "version: 2", + }, + { + name: "server component without spec file returns nothing", + component: "apm-server", + wantSubpaths: nil, + }, + { + name: "server component with spec paths returns paths", + component: "apm-server", + wantSubpaths: []string{ + "apm-server" + binarySuffix, + "apm-server.yml", + "apm-server.spec.yml", + "modules/*", + "apm.bundle.zip", + }, + specFileContent: `component_files: +- modules/* +- apm.bundle.zip`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + //write spec file content to temp file + tmpDir := t.TempDir() + if tt.specFileContent != "" { + specFilePath := filepath.Join(tmpDir, tt.component+".spec.yml") + err := os.WriteFile(specFilePath, []byte(tt.specFileContent), 0644) + require.NoError(t, err) + defer os.Remove(specFilePath) + } + + subpaths, err := subpathsForComponent(tt.component, tmpDir) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorContains) + return + } + + require.NoError(t, err) + sort.Strings(tt.wantSubpaths) + sort.Strings(subpaths) + assert.EqualValues(t, tt.wantSubpaths, subpaths) + }) + } +} + +func TestAllowedSubpathsForFlavor(t *testing.T) { + binarySuffix := "" + if runtime.GOOS == "windows" { + binarySuffix = ".exe" + } + versionedHome := t.TempDir() + tests := []struct { + name string + flavor string + specFiles map[string]string + wantError bool + errorContains string + wantSubpaths []string + }{ + { + name: "basic flavor with specs", + flavor: FlavorBasic, + specFiles: map[string]string{ + "agentbeat": "component_files:\n- modules/*\n- data/*\n", + }, + wantSubpaths: []string{ + "agentbeat" + binarySuffix, + "agentbeat.yml", + "agentbeat.spec.yml", + "modules/*", + "data/*", + }, + }, + { + name: "unknown flavor returns error", + flavor: "unknown", + wantError: true, + errorContains: ErrUnknownFlavor.Error(), + }, + { + name: "empty version home returns default paths", + flavor: FlavorBasic, + wantSubpaths: []string{}, + }, + { + name: "servers flavor with specs", + flavor: FlavorServers, + specFiles: map[string]string{ + "agentbeat": "component_files:\n- modules/*\n", + "apm-server": "component_files:\n- apm.bundle.zip\n", + "cloudbeat": "component_files:\n- rules/*\n", + }, + wantSubpaths: []string{ + "agentbeat" + binarySuffix, + "agentbeat.yml", + "agentbeat.spec.yml", + "modules/*", + "apm-server" + binarySuffix, + "apm-server.yml", + "apm-server.spec.yml", + "apm.bundle.zip", + "cloudbeat" + binarySuffix, + "cloudbeat.yml", + "cloudbeat.spec.yml", + "rules/*", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create temp dir with spec files + componentsDir := filepath.Join(versionedHome, "components") + require.NoError(t, os.MkdirAll(componentsDir, 0755)) + + // Write spec files + for component, content := range tt.specFiles { + specPath := filepath.Join(componentsDir, component+".spec.yml") + require.NoError(t, os.WriteFile(specPath, []byte(content), 0644)) + defer os.Remove(specPath) + } + + // Test function + flavor, err := Flavor(tt.flavor, "", flavorsRegistry) + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorContains) + return + } + + subpaths, err := allowedSubpathsForFlavor(versionedHome, flavor) + assert.NoError(t, err) + + require.NoError(t, err) + sort.Strings(tt.wantSubpaths) + sort.Strings(subpaths) + assert.Equal(t, tt.wantSubpaths, subpaths) + }) + } +} + +func TestSkipComponentsPathWithSubpathsFn(t *testing.T) { + tests := []struct { + name string + allowedPaths []string + testPaths map[string]bool // path -> should skip + }{ + // Case 1: Empty allowed paths + { + name: "empty allowed paths skips nothing", + allowedPaths: nil, + testPaths: map[string]bool{ + filepath.Join("data", "components", "test.txt"): false, + filepath.Join("data", "components", "dir", "file"): false, + }, + }, + + // Case 2: Exact file matches + { + name: "exact matches", + allowedPaths: []string{ + "agentbeat.exe", + "agentbeat.yml", + }, + testPaths: map[string]bool{ + filepath.Join("data", "components", "agentbeat.exe"): false, // allowed + filepath.Join("data", "components", "other.exe"): true, // skipped + }, + }, + + // Case 3: Directory wildcards + { + name: "directory wildcards", + allowedPaths: []string{ + "modules/*", + }, + testPaths: map[string]bool{ + filepath.Join("data", "components", "modules", "mod1"): false, // allowed + filepath.Join("data", "components", "other", "logs"): true, // skipped + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + skipFn, err := SkipComponentsPathWithSubpathsFn(tt.allowedPaths) + require.NoError(t, err) + + for path, wantSkip := range tt.testPaths { + got := skipFn(path) + assert.Equal(t, wantSkip, got, + "Path %s: wanted skip=%v, got skip=%v", path, wantSkip, got) + } + }) + } +} + +func TestSkipComponentsPathFn(t *testing.T) { + tests := []struct { + name string + flavor string + specFiles map[string]string // component -> spec content + testPaths map[string]bool // path -> should skip + wantError bool + errorContains string + }{ + { + name: "basic flavor components", + flavor: FlavorBasic, + specFiles: map[string]string{ + "agentbeat": "component_files:\n- data/*\n- logs/*\n", + }, + testPaths: map[string]bool{ + filepath.Join("data", "components", "data", "file.txt"): false, + filepath.Join("data", "components", "logs", "error.log"): false, + filepath.Join("data", "components", "rules", "rule1.yml"): true, + }, + }, + { + name: "servers flavor components", + flavor: FlavorServers, + specFiles: map[string]string{ + "cloudbeat": "component_files:\n- rules/*\n", + "apm-server": "component_files:\n- apm.bundle.zip\n", + }, + testPaths: map[string]bool{ + filepath.Join("data", "components", "rules", "rule1.yml"): false, + filepath.Join("data", "components", "apm.bundle.zip"): false, + filepath.Join("data", "components", "file.txt"): true, + }, + }, + { + name: "invalid flavor", + flavor: "invalid", + wantError: true, + errorContains: ErrUnknownFlavor.Error(), + }, + { + name: "no spec file", + flavor: FlavorBasic, + testPaths: map[string]bool{ + filepath.Join("data", "components", "agentbeat.exe"): true, + }, + }, + { + name: "no flavor falls back to keep all", + flavor: "", + specFiles: map[string]string{ + "agentbeat": "component_files:\n- data/*\n", + }, + testPaths: map[string]bool{ + filepath.Join("data", "components", "data", "file.txt"): false, + filepath.Join("data", "components", "logs", "error.log"): false, + filepath.Join("data", "components", "rules", "rule1.yml"): false, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup temp dir + tmpDir := t.TempDir() + if len(tt.specFiles) > 0 { + componentsDir := filepath.Join(tmpDir, "components") + require.NoError(t, os.MkdirAll(componentsDir, 0755)) + + // Create spec files + for component, content := range tt.specFiles { + specPath := filepath.Join(componentsDir, component+".spec.yml") + require.NoError(t, os.WriteFile(specPath, []byte(content), 0644)) + } + } + + // Test function + flavor, err := Flavor(tt.flavor, "", flavorsRegistry) + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorContains) + return + } + + skipFn, err := SkipComponentsPathFn(tmpDir, flavor) + assert.NoError(t, err) + + require.NoError(t, err) + require.NotNil(t, skipFn) + + // Test paths + for path, wantSkip := range tt.testPaths { + got := skipFn(path) + assert.Equal(t, wantSkip, got, + "Path %s: wanted skip=%v, got skip=%v", path, wantSkip, got) + } + }) + } +} + +func TestFlavor(t *testing.T) { + tests := []struct { + name string + setupFn func(dir string) error + defaultFlavor string + wantFlavor string + wantError bool + errorIs error + }{ + { + name: "no flavor file uses default", + defaultFlavor: FlavorBasic, + wantFlavor: FlavorBasic, + }, + { + name: "valid flavor file", + setupFn: func(dir string) error { + return os.WriteFile(filepath.Join(dir, flavorFileName), + []byte(FlavorServers), 0644) + }, + wantFlavor: FlavorServers, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup test directory + tmpDir := t.TempDir() + if tt.setupFn != nil { + require.NoError(t, tt.setupFn(tmpDir)) + } + + // Test function + got, err := UsedFlavor(tmpDir, tt.defaultFlavor) + + if tt.wantError { + require.Error(t, err) + if tt.errorIs != nil { + assert.ErrorIs(t, err, tt.errorIs) + } + return + } + + require.NoError(t, err) + assert.Equal(t, tt.wantFlavor, got) + }) + } +} + +func TestSpecsForFlavor(t *testing.T) { + tests := []struct { + name string + flavor string + wantSpecs []string + wantError bool + errorContains string + }{ + { + name: "basic flavor", + flavor: FlavorBasic, + wantSpecs: []string{ + "agentbeat.spec.yml", + "endpoint-security.spec.yml", + "pf-host-agent.spec.yml", + }, + }, + { + name: "servers flavor", + flavor: FlavorServers, + wantSpecs: []string{ + "agentbeat.spec.yml", + "endpoint-security.spec.yml", + "pf-host-agent.spec.yml", + "cloudbeat.spec.yml", + "apm-server.spec.yml", + "fleet-server.spec.yml", + "pf-elastic-symbolizer.spec.yml", + "pf-elastic-collector.spec.yml", + }, + }, + { + name: "empty flavor", + flavor: "", + wantError: true, + errorContains: ErrUnknownFlavor.Error(), + }, + { + name: "unknown flavor", + flavor: "unknown", + wantError: true, + errorContains: ErrUnknownFlavor.Error(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + flavor, err := Flavor(tt.flavor, "", flavorsRegistry) + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorContains) + return + } + assert.NoError(t, err) + + specs := SpecsForFlavor(flavor) + assert.ElementsMatch(t, tt.wantSpecs, specs) + }) + } +} diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index 966df65703a..d0c397c944f 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -40,7 +40,7 @@ const ( ) // Install installs Elastic Agent persistently on the system including creating and starting its service. -func Install(cfgFile, topPath string, unprivileged bool, log *logp.Logger, pt *progressbar.ProgressBar, streams *cli.IOStreams, customUser, customGroup, userPassword string) (utils.FileOwner, error) { +func Install(cfgFile, topPath string, unprivileged bool, log *logp.Logger, pt *progressbar.ProgressBar, streams *cli.IOStreams, customUser, customGroup, userPassword string, flavor string) (utils.FileOwner, error) { dir, err := findDirectory() if err != nil { return utils.FileOwner{}, errors.New(err, "failed to discover the source directory for installation", errors.TypeFilesystem) @@ -75,12 +75,29 @@ func Install(cfgFile, topPath string, unprivileged bool, log *logp.Logger, pt *p pt.Describe("Copying install files") copyConcurrency := calculateCopyConcurrency(streams) - err = copyFiles(copyConcurrency, pathMappings, dir, topPath) + + skipFn := func(relPath string) bool { return false } + if flavor != "" { + flavorDefinition, err := Flavor(flavor, "", manifest.Package.Flavors) + if err != nil { + return utils.FileOwner{}, err + } + skipFn, err = SkipComponentsPathFn(paths.VersionedHome(dir), flavorDefinition) + if err != nil { + return utils.FileOwner{}, err + } + } + + err = copyFiles(copyConcurrency, pathMappings, dir, topPath, skipFn) if err != nil { pt.Describe("Error copying files") return utils.FileOwner{}, err } + if err := markFlavor(topPath, flavor); err != nil { + return utils.FileOwner{}, fmt.Errorf("failed marking flavor %q at %q: %w", flavor, topPath, err) + } + pt.Describe("Successfully copied files") // place shell wrapper, if present on platform @@ -211,7 +228,7 @@ func calculateCopyConcurrency(streams *cli.IOStreams) int { return copyConcurrency } -func copyFiles(copyConcurrency int, pathMappings []map[string]string, srcDir string, topPath string) error { +func copyFiles(copyConcurrency int, pathMappings []map[string]string, srcDir string, topPath string, skipFn func(string) bool) error { // copy source into install path // these are needed to keep track of what we already copied @@ -232,6 +249,18 @@ func copyFiles(copyConcurrency int, pathMappings []map[string]string, srcDir str OnSymlink: func(_ string) copy.SymlinkAction { return copy.Shallow }, + Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { + relPath, err := filepath.Rel(srcDir, src) + if err != nil { + return false, fmt.Errorf("calculating relative path for %s: %w", src, err) + } + + if skipFn != nil && skipFn(relPath) { + return true, nil + } + + return false, nil + }, Sync: true, NumOfWorkers: int64(copyConcurrency), }) @@ -281,6 +310,11 @@ func copyFiles(copyConcurrency int, pathMappings []map[string]string, srcDir str if err != nil { return false, fmt.Errorf("calculating relative path for %s: %w", src, err) } + + if skipFn != nil && skipFn(relPath) { + return true, nil + } + // check if we already handled this path as part of the mappings: if we did, skip it relPath = filepath.ToSlash(relPath) _, ok := copiedFiles[relPath] diff --git a/internal/pkg/agent/install/install_test.go b/internal/pkg/agent/install/install_test.go index 2f407e6baab..f2716e493f6 100644 --- a/internal/pkg/agent/install/install_test.go +++ b/internal/pkg/agent/install/install_test.go @@ -190,7 +190,7 @@ func TestCopyFiles(t *testing.T) { // not interested in speed benchmarks, use an arbitrary copyConcurrency value copyConcurrency := 4 - err := copyFiles(copyConcurrency, tc.mappings, tmpSrc, tmpDst) + err := copyFiles(copyConcurrency, tc.mappings, tmpSrc, tmpDst, nil) assert.NoError(t, err) for _, ef := range tc.expectedFiles { diff --git a/pkg/api/v1/manifest.go b/pkg/api/v1/manifest.go index ad7c7a4ef97..6650ee6e61b 100644 --- a/pkg/api/v1/manifest.go +++ b/pkg/api/v1/manifest.go @@ -20,6 +20,7 @@ type PackageDesc struct { Hash string `yaml:"hash,omitempty" json:"hash,omitempty"` VersionedHome string `yaml:"versioned-home,omitempty" json:"versionedHome,omitempty"` PathMappings []map[string]string `yaml:"path-mappings,omitempty" json:"pathMappings,omitempty"` + Flavors map[string][]string `yaml:"flavors,omitempty" json:"flavors,omitempty"` } type PackageManifest struct { diff --git a/pkg/component/load.go b/pkg/component/load.go index f077eec20e8..057a1818989 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -9,8 +9,12 @@ import ( "fmt" "os" "path/filepath" + "runtime" + "strings" "github.com/elastic/go-ucfg/yaml" + + yamlv3 "gopkg.in/yaml.v3" ) const ( @@ -64,6 +68,35 @@ func SkipBinaryCheck() LoadRuntimeOption { } } +// ParseComponentFiles parses spec files and returns list of associated paths with component. +// Default set consisting of binary, spec file and default config file is always present +func ParseComponentFiles(content []byte, filename string, includeDefaults bool) ([]string, error) { + def := struct { + Files []string `yaml:"component_files"` + }{} + + if err := yamlv3.Unmarshal(content, &def); err != nil { + return nil, err + } + + var files []string + files = append(files, def.Files...) + + if includeDefaults { + component := strings.TrimSuffix(filepath.Base(filename), ".spec.yml") + binaryName := component + if runtime.GOOS == "windows" { + binaryName += ".exe" + } + files = append(files, + binaryName, + fmt.Sprintf("%s.spec.yml", component), + fmt.Sprintf("%s.yml", component)) + } + + return files, nil +} + // LoadRuntimeSpecs loads all the component input specifications from the provided directory. // // Returns a mapping of the input to binary name with specification for that input. The filenames in the directory diff --git a/pkg/component/load_test.go b/pkg/component/load_test.go index cd50ef8e9dc..4857cef3d2a 100644 --- a/pkg/component/load_test.go +++ b/pkg/component/load_test.go @@ -7,12 +7,115 @@ package component import ( "os" "path/filepath" + "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestParseComponentFiles(t *testing.T) { + binarySuffix := "" + if runtime.GOOS == "windows" { + binarySuffix = ".exe" + } + tests := []struct { + name string + content []byte + filename string + includeDefaults bool + want []string + wantErr bool + }{ + { + name: "empty content with no defaults", + content: []byte(`{}`), + filename: "test.spec.yml", + includeDefaults: false, + want: []string{}, + wantErr: false, + }, + { + name: "empty content with defaults", + content: []byte(`{}`), + filename: "test.spec.yml", + includeDefaults: true, + want: []string{ + "test" + binarySuffix, // binary name + "test.spec.yml", // spec file + "test.yml", // default config + }, + wantErr: false, + }, + { + name: "empty content with defaults, long name", + content: []byte(`{}`), + filename: filepath.Join("this", "is", "path", "test.spec.yml"), + includeDefaults: true, + want: []string{ + "test" + binarySuffix, // binary name + "test.spec.yml", // spec file + "test.yml", // default config + }, + wantErr: false, + }, + { + name: "with explicit files", + content: []byte(` +component_files: + - "module/config/*" + - "module/schemas/*" +`), + filename: "test.spec.yml", + includeDefaults: false, + want: []string{ + "module/config/*", + "module/schemas/*", + }, + wantErr: false, + }, + { + name: "with explicit files and defaults", + content: []byte(` +component_files: + - "module/config/*" + - "module/schemas/*" +`), + filename: "test.spec.yml", + includeDefaults: true, + want: []string{ + "module/config/*", + "module/schemas/*", + "test" + binarySuffix, // binary name + "test.spec.yml", // spec file + "test.yml", // default config + }, + wantErr: false, + }, + { + name: "invalid yaml content", + content: []byte(`{invalid`), + filename: "test.spec.yml", + includeDefaults: true, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseComponentFiles(tt.content, tt.filename, tt.includeDefaults) + + if tt.wantErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + assert.ElementsMatch(t, tt.want, got) + }) + } +} + func TestLoadRuntimeSpecs(t *testing.T) { for _, platform := range GlobalPlatforms { t.Run(platform.String(), func(t *testing.T) { diff --git a/pkg/testing/fixture_install.go b/pkg/testing/fixture_install.go index d983e6fb62f..dd15b4bd204 100644 --- a/pkg/testing/fixture_install.go +++ b/pkg/testing/fixture_install.go @@ -110,6 +110,7 @@ type InstallOpts struct { DelayEnroll bool // --delay-enroll Develop bool // --develop, not supported for DEB and RPM. Calling Install() sets Namespace to the development namespace so that checking only for a Namespace is sufficient. Namespace string // --namespace, not supported for DEB and RPM. + InstallServers bool // --install-servers Privileged bool // inverse of --unprivileged (as false is the default) Username string @@ -153,6 +154,10 @@ func (i *InstallOpts) ToCmdArgs() []string { } } + if i.InstallServers { + args = append(args, "--install-servers") + } + if i.Username != "" { args = append(args, "--user", i.Username) } diff --git a/specs/apm-server.spec.yml b/specs/apm-server.spec.yml index 4f8017544d1..08457d63982 100644 --- a/specs/apm-server.spec.yml +++ b/specs/apm-server.spec.yml @@ -1,4 +1,8 @@ version: 2 +component_files: + - "java-attarcher.jar" + - "install-service.ps1" + - "uninstall-service.ps1" inputs: - name: apm description: "APM Server" diff --git a/specs/cloudbeat.spec.yml b/specs/cloudbeat.spec.yml index 58b9d16e872..b93ff94a261 100644 --- a/specs/cloudbeat.spec.yml +++ b/specs/cloudbeat.spec.yml @@ -1,4 +1,6 @@ version: 2 +component_files: + - bundle.tar.gz inputs: - name: cloudbeat description: "Cloudbeat" diff --git a/specs/endpoint-security.spec.yml b/specs/endpoint-security.spec.yml index c8dd247d23f..50ff5f2efc7 100644 --- a/specs/endpoint-security.spec.yml +++ b/specs/endpoint-security.spec.yml @@ -1,4 +1,6 @@ version: 2 +component_files: + - endpoint-security-resources.zip inputs: - name: endpoint description: "Endpoint Security" diff --git a/testing/integration/fleetserver_test.go b/testing/integration/fleetserver_test.go index 2d75434fb71..a7f4c49b406 100644 --- a/testing/integration/fleetserver_test.go +++ b/testing/integration/fleetserver_test.go @@ -115,6 +115,7 @@ func TestInstallFleetServerBootstrap(t *testing.T) { Policy: policy.ID, Port: 8220, }, + InstallServers: true, } out, err := fixture.Install(ctx, opts) if err != nil { @@ -160,6 +161,7 @@ func TestInstallFleetServerBootstrap(t *testing.T) { Policy: policy.ID, Port: 8220, }, + InstallServers: true, } out, err := fixture.Install(ctx, opts) if err != nil { diff --git a/testing/integration/install_test.go b/testing/integration/install_test.go index 2c3a0d9b827..7d7da4ad42e 100644 --- a/testing/integration/install_test.go +++ b/testing/integration/install_test.go @@ -16,6 +16,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "strings" "testing" "time" @@ -25,6 +26,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/install" + aTesting "github.com/elastic/elastic-agent/pkg/testing" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" "github.com/elastic/elastic-agent/pkg/testing/tools/check" @@ -33,6 +35,11 @@ import ( "github.com/elastic/elastic-agent/testing/installtest" ) +type componentPresenceDefinition struct { + name string + platforms []string +} + func TestInstallWithoutBasePath(t *testing.T) { define.Require(t, define.Requirements{ Group: Default, @@ -157,6 +164,114 @@ func TestInstallWithBasePath(t *testing.T) { t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) t.Run("check second agent installs with --namespace", testSecondAgentCanInstall(ctx, fixture, basePath, false, opts)) + t.Run("check components set", + testComponentsPresence(ctx, fixture, + []componentPresenceDefinition{ + {"agentbeat", []string{"windows", "linux", "darwin"}}, + {"endpoint-security", []string{"windows", "linux", "darwin"}}, + {"pf-host-agent", []string{"linux"}}, + }, + []componentPresenceDefinition{ + {"cloudbeat", []string{"linux"}}, + {"apm-server", []string{"windows", "linux", "darwin"}}, + {"fleet-server", []string{"windows", "linux", "darwin"}}, + {"pf-elastic-symbolizer", []string{"linux"}}, + {"pf-elastic-collector", []string{"linux"}}, + })) + + // Make sure uninstall from within the topPath fails on Windows + if runtime.GOOS == "windows" { + cwd, err := os.Getwd() + require.NoErrorf(t, err, "GetWd failed: %s", err) + err = os.Chdir(topPath) + require.NoErrorf(t, err, "Chdir to topPath failed: %s", err) + t.Cleanup(func() { + _ = os.Chdir(cwd) + }) + out, err = fixture.Uninstall(ctx, &atesting.UninstallOpts{Force: true}) + require.Error(t, err, "uninstall should have failed") + require.Containsf(t, string(out), "uninstall must be run from outside the installed path", "expected error string not found in: %s err: %s", out, err) + } +} + +func TestInstallServersWithBasePath(t *testing.T) { + define.Require(t, define.Requirements{ + Group: Default, + // We require sudo for this test to run + // `elastic-agent install` (even though it will + // be installed as non-root). + Sudo: true, + + // It's not safe to run this test locally as it + // installs Elastic Agent. + Local: false, + }) + + // Get path to Elastic Agent executable + fixture, err := define.NewFixtureFromLocalBuild(t, define.Version()) + require.NoError(t, err) + + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + + // Prepare the Elastic Agent so the binary is extracted and ready to use. + err = fixture.Prepare(ctx) + require.NoError(t, err) + + // When installing with unprivileged using a base path the + // base needs to be accessible by the `elastic-agent-user` user that will be + // executing the process, but is not created yet. Using a base that exists + // and is known to be accessible by standard users, ensures this tests + // works correctly and will not hit a permission issue when spawning the + // elastic-agent service. + var basePath string + switch runtime.GOOS { + case define.Linux: + basePath = `/usr` + case define.Windows: + basePath = `C:\` + default: + // Set up random temporary directory to serve as base path for Elastic Agent + // installation. + tmpDir := t.TempDir() + basePath = filepath.Join(tmpDir, strings.ToLower(randStr(8))) + } + + // Run `elastic-agent install`. We use `--force` to prevent interactive + // execution. + opts := atesting.InstallOpts{ + BasePath: basePath, + Force: true, + Privileged: false, + InstallServers: true, + } + out, err := fixture.Install(ctx, &opts) + if err != nil { + t.Logf("install output: %s", out) + require.NoError(t, err) + } + + // Check that Agent was installed in the custom base path + topPath := filepath.Join(basePath, "Elastic", "Agent") + require.NoError(t, installtest.CheckSuccess(ctx, fixture, topPath, &installtest.CheckOpts{Privileged: opts.Privileged})) + + t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) + t.Run("check second agent installs with --namespace", testSecondAgentCanInstall(ctx, fixture, basePath, false, opts)) + + t.Run("check components set", + testComponentsPresence(ctx, fixture, + []componentPresenceDefinition{ + {"agentbeat", []string{"windows", "linux", "darwin"}}, + {"endpoint-security", []string{"windows", "linux", "darwin"}}, + {"pf-host-agent", []string{"linux"}}, + {"cloudbeat", []string{"linux"}}, + {"apm-server", []string{"windows", "linux", "darwin"}}, + {"fleet-server", []string{"windows", "linux", "darwin"}}, + {"pf-elastic-symbolizer", []string{"linux"}}, + {"pf-elastic-collector", []string{"linux"}}, + }, + []componentPresenceDefinition{})) + // Make sure uninstall from within the topPath fails on Windows if runtime.GOOS == "windows" { cwd, err := os.Getwd() @@ -303,6 +418,43 @@ func testInstallWithoutBasePathWithCustomUser(ctx context.Context, t *testing.T, } } +func testComponentsPresence(ctx context.Context, fixture *atesting.Fixture, requiredComponents []componentPresenceDefinition, unwantedComponents []componentPresenceDefinition) func(*testing.T) { + return func(t *testing.T) { + agentWorkDir := fixture.WorkDir() + componentsDir, err := aTesting.FindComponentsDir(agentWorkDir) + require.NoError(t, err) + + componentsPaths := func(component string) []string { + binarySuffix := "" + if runtime.GOOS == "windows" { + binarySuffix += ".exe" + } + return []string{ + filepath.Join(componentsDir, component+binarySuffix), + filepath.Join(componentsDir, component+".spec.yml"), + } + } + + for _, requiredComponent := range requiredComponents { + for _, reqPath := range componentsPaths(requiredComponent.name) { + _, err := os.Stat(reqPath) + if slices.Contains(requiredComponent.platforms, runtime.GOOS) { + require.NoErrorf(t, err, "expecting component %q to be present: %v", requiredComponent, err) + } else { + require.ErrorIs(t, err, os.ErrNotExist, "expecting component %q to be missing but was found", requiredComponent) + } + } + } + + for _, unwantedComponent := range unwantedComponents { + for _, reqPath := range componentsPaths(unwantedComponent.name) { + _, err := os.Stat(reqPath) + require.ErrorIs(t, err, os.ErrNotExist, "expecting component %q to be missing but was found", unwantedComponent) + } + } + } +} + // Tests that a second agent can be installed in an isolated namespace, using either --develop or --namespace. func testSecondAgentCanInstall(ctx context.Context, fixture *atesting.Fixture, basePath string, develop bool, installOpts atesting.InstallOpts) func(*testing.T) { return func(t *testing.T) { diff --git a/testing/integration/upgrade_standalone_same_commit_test.go b/testing/integration/upgrade_standalone_same_commit_test.go index e493d184ac2..3d508f44a81 100644 --- a/testing/integration/upgrade_standalone_same_commit_test.go +++ b/testing/integration/upgrade_standalone_same_commit_test.go @@ -368,8 +368,8 @@ func generateNewManifestContent(t *testing.T, manifestReader io.Reader, newVersi t.Logf("read old manifest: %+v", oldManifest) // replace manifest content - newManifest, err := mage.GeneratePackageManifest("elastic-agent", newVersion.String(), oldManifest.Package.Snapshot, oldManifest.Package.Hash, oldManifest.Package.Hash[:6]) - require.NoErrorf(t, err, "GeneratePackageManifest(%v, %v, %v, %v) failed", newVersion.String(), oldManifest.Package.Snapshot, oldManifest.Package.Hash, oldManifest.Package.Hash[:6]) + newManifest, err := mage.GeneratePackageManifest("elastic-agent", newVersion.String(), oldManifest.Package.Snapshot, oldManifest.Package.Hash, oldManifest.Package.Hash[:6], nil) + require.NoErrorf(t, err, "GeneratePackageManifest(%v, %v, %v, %v, %v) failed", newVersion.String(), oldManifest.Package.Snapshot, oldManifest.Package.Hash, oldManifest.Package.Hash[:6], nil) t.Logf("generated new manifest:\n%s", newManifest) return newManifest From 300d88e3e0c454a8f2340459c0b83aa1c7a4ac0a Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Thu, 30 Jan 2025 13:00:57 +0200 Subject: [PATCH 06/17] Add redis and nginx otel receivers (#6627) * Add redis and nginx otel receivers Signed-off-by: ChrsMark * Apply suggestions from code review Co-authored-by: Andrzej Stencel --------- Signed-off-by: ChrsMark Co-authored-by: Andrzej Stencel --- NOTICE.txt | 758 ++++++++++++++++++ ...1738160406-add_redis_nginx_otel_comps.yaml | 32 + go.mod | 5 + go.sum | 14 + internal/pkg/otel/README.md | 2 + internal/pkg/otel/components.go | 4 + 6 files changed, 815 insertions(+) create mode 100644 changelog/fragments/1738160406-add_redis_nginx_otel_comps.yaml diff --git a/NOTICE.txt b/NOTICE.txt index 89f1f10eb9d..99b8eac5e82 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10529,6 +10529,217 @@ Contents of probable licence file $GOMODCACHE/github.com/open-telemetry/opentele limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver +Version: v0.117.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver@v0.117.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver Version: v0.117.0 @@ -10951,6 +11162,217 @@ Contents of probable licence file $GOMODCACHE/github.com/open-telemetry/opentele limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver +Version: v0.117.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver@v0.117.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver Version: v0.117.0 @@ -36403,6 +36825,66 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/bsm/ginkgo/v2 +Version: v2.12.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/bsm/ginkgo/v2@v2.12.0/LICENSE: + +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/bsm/gomega +Version: v1.27.10 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/bsm/gomega@v1.27.10/LICENSE: + +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/bugsnag/bugsnag-go Version: v0.0.0-20141110184014-b1d153021fcd @@ -40755,6 +41237,37 @@ THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/dgryski/go-rendezvous +Version: v0.0.0-20200823014737-9f7001d12a5f +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-rendezvous@v0.0.0-20200823014737-9f7001d12a5f/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2017-2020 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/digitalocean/go-libvirt Version: v0.0.0-20240709142323-d8406205c752 @@ -69294,6 +69807,216 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/nginxinc/nginx-prometheus-exporter +Version: v0.11.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/nginxinc/nginx-prometheus-exporter@v0.11.0/LICENSE: + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2018 Nginx, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + -------------------------------------------------------------------------------- Dependency : github.com/nxadm/tail Version: v1.4.11 @@ -79721,6 +80444,41 @@ are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Richard Crowley. +-------------------------------------------------------------------------------- +Dependency : github.com/redis/go-redis/v9 +Version: v9.7.0 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/redis/go-redis/v9@v9.7.0/LICENSE: + +Copyright (c) 2013 The github.com/redis/go-redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/relvacode/iso8601 Version: v1.6.0 diff --git a/changelog/fragments/1738160406-add_redis_nginx_otel_comps.yaml b/changelog/fragments/1738160406-add_redis_nginx_otel_comps.yaml new file mode 100644 index 00000000000..576b9fb0d17 --- /dev/null +++ b/changelog/fragments/1738160406-add_redis_nginx_otel_comps.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add Nginx receiver and Redis receiver OTel components + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/go.mod b/go.mod index 4b2108269ae..bcab266a17b 100644 --- a/go.mod +++ b/go.mod @@ -53,8 +53,10 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.117.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver v0.117.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.117.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver v0.117.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.117.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.117.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver v0.117.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.117.0 github.com/otiai10/copy v1.14.0 github.com/rednafi/link-patrol v0.0.0-20240826150821-057643e74d4d @@ -258,6 +260,7 @@ require ( github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2 // indirect github.com/dgraph-io/badger/v4 v4.5.0 // indirect github.com/dgraph-io/ristretto/v2 v2.0.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitalocean/go-libvirt v0.0.0-20240709142323-d8406205c752 // indirect github.com/digitalocean/godo v1.122.0 // indirect github.com/dlclark/regexp2 v1.4.0 // indirect @@ -436,6 +439,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/nginxinc/nginx-prometheus-exporter v0.11.0 // indirect github.com/nxadm/tail v1.4.11 // indirect github.com/onsi/ginkgo/v2 v2.20.0 // indirect github.com/onsi/gomega v1.34.1 // indirect @@ -485,6 +489,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/prometheus v0.54.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/redis/go-redis/v9 v9.7.0 // indirect github.com/relvacode/iso8601 v1.6.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect diff --git a/go.sum b/go.sum index 5048a566ab3..b0b067407d7 100644 --- a/go.sum +++ b/go.sum @@ -303,6 +303,10 @@ github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0 github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= @@ -389,6 +393,8 @@ github.com/dgraph-io/ristretto/v2 v2.0.0 h1:l0yiSOtlJvc0otkqyMaDNysg8E9/F/TYZwMb github.com/dgraph-io/ristretto/v2 v2.0.0/go.mod h1:FVFokF2dRqXyPyeMnK1YDy8Fc6aTe0IKgbcd03CYeEk= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/go-libvirt v0.0.0-20240709142323-d8406205c752 h1:NI7XEcHzWVvBfVjSVK6Qk4wmrUfoyQxCNpBjrHelZFk= github.com/digitalocean/go-libvirt v0.0.0-20240709142323-d8406205c752/go.mod h1:/Ok8PA2qi/ve0Py38+oL+VxoYmlowigYRyLEODRYdgc= github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= @@ -1101,6 +1107,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nginxinc/nginx-prometheus-exporter v0.11.0 h1:21xjnqNgxtni2jDgAQ90bl15uDnrTreO9sIlu1YsX/U= +github.com/nginxinc/nginx-prometheus-exporter v0.11.0/go.mod h1:GdyHnWAb8q8OW1Pssrrqbcqra0SH0Vn6UXICMmyWkw8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= @@ -1240,10 +1248,14 @@ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.117.0/go.mod h1:2bLb8fdBoH7Tpq76JVkZhLDOVv+D1q8Qvmp3OII8nXA= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver v0.117.0 h1:FZGlGZvgOZCWjGtrUpqpqqvknJDtsuwdX9Q4SBUIRtE= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver v0.117.0/go.mod h1:1ELm4C0E0On6u7hwVqVJymwmfx8NhfgAwZNb/g/IN00= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver v0.117.0 h1:0cwqouVCkxlSqOYPw+9J6IIoCDcPqSAs8B1aO+BcjWU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver v0.117.0/go.mod h1:OHVKFpZWh0txJt3l3Xo/evrHUU3y20WAts1ngUvZdwk= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.117.0 h1:ZukmPvZSJnDPvUH0ECDliYxyRgSMs6mWXnX70uGnhvk= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.117.0/go.mod h1:aZzeEf3lN/i6gH5vVGzLdzH6JO2r6xwep0S0fvW8iMI= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.117.0 h1:fHQzeVV8uMHApo9tBtxSXocv2IcCeswFPZwjvOAkDrc= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.117.0/go.mod h1:0HQuGsZePaLKa1dOhvSqP1aV36j/cnT7i536TEJV1R8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver v0.117.0 h1:SnxBuGO5SSABd0eo4gvJRyXUFLM0r9Fkmvoeo2JEahg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver v0.117.0/go.mod h1:yG54t4EUxaWongYFn57NdMO9AQGSaCVMgdYJX1uKRLc= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.117.0 h1:ZzESLZAIBZ3Z7e33WCUCdX2tDjxD1/A748aTmqReEvo= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.117.0/go.mod h1:zClB9Yr77xTGG2b2g4+EGpzdW84lPCTPemFWjsyJFMw= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -1342,6 +1354,8 @@ github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+Yb github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rednafi/link-patrol v0.0.0-20240826150821-057643e74d4d h1:pflUVm462wgbblymtVaGR8vAoV5o3wHDwg60fzoflBo= github.com/rednafi/link-patrol v0.0.0-20240826150821-057643e74d4d/go.mod h1:wAGfe4fPMwk3UX7Tx2RfHAo6FvS6ZUp2U8ZcCoP8hqs= github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= diff --git a/internal/pkg/otel/README.md b/internal/pkg/otel/README.md index 56447e1d71c..ee726a8731c 100644 --- a/internal/pkg/otel/README.md +++ b/internal/pkg/otel/README.md @@ -44,10 +44,12 @@ This section provides a summary of components included in the Elastic Distributi | [k8sobjectsreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/k8sobjectsreceiver/v0.117.0/receiver/k8sobjectsreceiver/README.md) | v0.117.0 | | [kafkareceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/kafkareceiver/v0.117.0/receiver/kafkareceiver/README.md) | v0.117.0 | | [kubeletstatsreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/kubeletstatsreceiver/v0.117.0/receiver/kubeletstatsreceiver/README.md) | v0.117.0 | +| [nginxreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/nginxreceiver/v0.117.0/receiver/nginxreceiver/README.md) | v0.117.0 | | [nopreceiver](https://github.com/open-telemetry/opentelemetry-collector/blob/receiver/nopreceiver/v0.117.0/receiver/nopreceiver/README.md) | v0.117.0 | | [otlpreceiver](https://github.com/open-telemetry/opentelemetry-collector/blob/receiver/otlpreceiver/v0.117.0/receiver/otlpreceiver/README.md) | v0.117.0 | | [prometheusreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/prometheusreceiver/v0.117.0/receiver/prometheusreceiver/README.md) | v0.117.0 | | [receivercreator](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/receivercreator/v0.117.0/receiver/receivercreator/README.md) | v0.117.0 | +| [redisreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/redisreceiver/v0.117.0/receiver/redisreceiver/README.md) | v0.117.0 | | [zipkinreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/receiver/zipkinreceiver/v0.117.0/receiver/zipkinreceiver/README.md) | v0.117.0 | ### Exporters diff --git a/internal/pkg/otel/components.go b/internal/pkg/otel/components.go index dc20b4eea0a..2f38d3a352c 100644 --- a/internal/pkg/otel/components.go +++ b/internal/pkg/otel/components.go @@ -22,8 +22,10 @@ import ( k8sobjectsreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sobjectsreceiver" kafkareceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" kubeletstatsreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver" + nginxreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver" prometheusreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver" receivercreator "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator" + redisreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver" zipkinreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" nopreceiver "go.opentelemetry.io/collector/receiver/nopreceiver" otlpreceiver "go.opentelemetry.io/collector/receiver/otlpreceiver" @@ -86,6 +88,8 @@ func components(extensionFactories ...extension.Factory) func() (otelcol.Factori k8sobjectsreceiver.NewFactory(), prometheusreceiver.NewFactory(), receivercreator.NewFactory(), + redisreceiver.NewFactory(), + nginxreceiver.NewFactory(), jaegerreceiver.NewFactory(), zipkinreceiver.NewFactory(), fbreceiver.NewFactory(), From 0b8586f9d4f16bcfb899e60495991f9b27a3f271 Mon Sep 17 00:00:00 2001 From: "elastic-vault-github-plugin-prod[bot]" <150874479+elastic-vault-github-plugin-prod[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 14:03:33 +0100 Subject: [PATCH 07/17] [Release] add-backport-next (#6645) Co-authored-by: elasticmachine --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index dd7d31b96ba..7816e13d86b 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -371,3 +371,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.18 branch + conditions: + - merged + - label=backport-8.18 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.18" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" From 662b336be571b859571edf46156d3a076ff55310 Mon Sep 17 00:00:00 2001 From: Pavel Zorin Date: Thu, 30 Jan 2025 17:06:36 +0100 Subject: [PATCH 08/17] [CI] Disable scheduled BK integration tests (#6348) * [CI] Remove temporary scheduled test pipeline * cleanup * cleanup * Removed schedule only --- catalog-info.yaml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 1ee15621b22..1d211e27e2f 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -136,11 +136,6 @@ spec: trigger_mode: none # don't trigger jobs from github activity cancel_intermediate_builds: false skip_intermediate_builds: false - schedules: - daily: - branch: main - cronline: "0 */6 * * *" # every 6 hours - message: Test execution of the new pipeline teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -172,9 +167,9 @@ spec: repository: elastic/elastic-agent pipeline_file: ".buildkite/pipeline.elastic-agent-package.yml" env: - ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true' + ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#ingest-notifications" - SLACK_NOTIFICATIONS_ON_SUCCESS: 'false' + SLACK_NOTIFICATIONS_ON_SUCCESS: "false" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" provider_settings: build_pull_request_forks: false From cab384c5735ddfa9e4d66d6c551b1486c34a4cb4 Mon Sep 17 00:00:00 2001 From: Pavel Zorin Date: Thu, 30 Jan 2025 17:08:11 +0100 Subject: [PATCH 09/17] Integration tests: Parse agent version from version.go (#6611) * Integration tests: use the mage target to get agent version * Added snapshot to version * Test run * Test run Windows * Removed -v flag * Parse agent version * Cleanup --- .buildkite/scripts/buildkite-integration-tests.sh | 15 +++++++++------ .buildkite/scripts/integration-tests.ps1 | 9 ++++++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.buildkite/scripts/buildkite-integration-tests.sh b/.buildkite/scripts/buildkite-integration-tests.sh index 1659a4059d8..bc509eef2b0 100755 --- a/.buildkite/scripts/buildkite-integration-tests.sh +++ b/.buildkite/scripts/buildkite-integration-tests.sh @@ -26,15 +26,15 @@ fi asdf install echo "~~~ Running integration tests as $USER" -echo "~~~ Integration tests: ${GROUP_NAME}" go install gotest.tools/gotestsum gotestsum --version -PACKAGE_VERSION="$(cat .package-version)" -if [[ -n "$PACKAGE_VERSION" ]]; then - PACKAGE_VERSION=${PACKAGE_VERSION}"-SNAPSHOT" -fi +# Parsing version.go. Will be simplified here: https://github.com/elastic/ingest-dev/issues/4925 +AGENT_VERSION=$(grep "const defaultBeatVersion =" version/version.go | cut -d\" -f2) +AGENT_VERSION="${AGENT_VERSION}-SNAPSHOT" +export AGENT_VERSION +echo "~~~ Agent version: ${AGENT_VERSION}" os_data=$(uname -spr | tr ' ' '_') root_suffix="" @@ -44,8 +44,11 @@ fi fully_qualified_group_name="${GROUP_NAME}${root_suffix}_${os_data}" outputXML="build/${fully_qualified_group_name}.integration.xml" outputJSON="build/${fully_qualified_group_name}.integration.out.json" + +echo "~~~ Integration tests: ${GROUP_NAME}" + set +e -TEST_BINARY_NAME="elastic-agent" AGENT_VERSION="${PACKAGE_VERSION}" SNAPSHOT=true gotestsum --no-color -f standard-quiet --junitfile "${outputXML}" --jsonfile "${outputJSON}" -- -tags integration -test.shuffle on -test.timeout 2h0m0s github.com/elastic/elastic-agent/testing/integration -v -args -integration.groups="${GROUP_NAME}" -integration.sudo="${TEST_SUDO}" +TEST_BINARY_NAME="elastic-agent" AGENT_VERSION="${AGENT_VERSION}" SNAPSHOT=true gotestsum --no-color -f standard-quiet --junitfile "${outputXML}" --jsonfile "${outputJSON}" -- -tags integration -test.shuffle on -test.timeout 2h0m0s github.com/elastic/elastic-agent/testing/integration -v -args -integration.groups="${GROUP_NAME}" -integration.sudo="${TEST_SUDO}" TESTS_EXIT_STATUS=$? set -e diff --git a/.buildkite/scripts/integration-tests.ps1 b/.buildkite/scripts/integration-tests.ps1 index 34cceb843ba..36602d2730b 100755 --- a/.buildkite/scripts/integration-tests.ps1 +++ b/.buildkite/scripts/integration-tests.ps1 @@ -18,7 +18,10 @@ if ($PACKAGE_VERSION) { $PACKAGE_VERSION = "${PACKAGE_VERSION}-SNAPSHOT" } $env:TEST_BINARY_NAME = "elastic-agent" -$env:AGENT_VERSION = $PACKAGE_VERSION +# Parsing version.go. Will be simplified here: https://github.com/elastic/ingest-dev/issues/4925 +$AGENT_VERSION = (Get-Content version/version.go | Select-String -Pattern 'const defaultBeatVersion =' | ForEach-Object { $_ -replace '.*?"(.*?)".*', '$1' }) +$env:AGENT_VERSION = $AGENT_VERSION + "-SNAPSHOT" +echo "~~~ Agent version: $env:AGENT_VERSION" $env:SNAPSHOT = $true echo "~~~ Building test binaries" @@ -32,9 +35,9 @@ $fully_qualified_group_name="${GROUP_NAME}${root_suffix}_${osInfo}" $outputXML = "build/${fully_qualified_group_name}.integration.xml" $outputJSON = "build/${fully_qualified_group_name}.integration.out.json" try { - Get-Ess-Stack -StackVersion $PACKAGE_VERSION + Get-Ess-Stack -StackVersion $PACKAGE_VERSION Write-Output "~~~ Running integration test group: $GROUP_NAME as user: $env:USERNAME" - gotestsum --no-color -f standard-quiet --junitfile "${outputXML}" --jsonfile "${outputJSON}" -- -tags=integration -shuffle=on -timeout=2h0m0s "github.com/elastic/elastic-agent/testing/integration" -v -args "-integration.groups=$GROUP_NAME" "-integration.sudo=$TEST_SUDO" + gotestsum --no-color -f standard-quiet --junitfile "${outputXML}" --jsonfile "${outputJSON}" -- -tags=integration -shuffle=on -timeout=2h0m0s "github.com/elastic/elastic-agent/testing/integration" -v -args "-integration.groups=$GROUP_NAME" "-integration.sudo=$TEST_SUDO" } finally { ess_down From 714e0065f12e468c51b10162fccda6c9c0dea3f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20Alvarez=20Pi=C3=B1eiro?= <95703246+emilioalvap@users.noreply.github.com> Date: Thu, 30 Jan 2025 18:45:24 +0100 Subject: [PATCH 10/17] [Heartbeat] Upgrade Node to LTS v18.20.6 (#6641) * Upgrade NodeJS to latest v18 LTS --- ...4199-heartbeat-upgrade-nodejs-18-20-6.yaml | 32 +++++++++++++++++++ .../docker/Dockerfile.elastic-agent.tmpl | 4 +-- 2 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 changelog/fragments/1738234199-heartbeat-upgrade-nodejs-18-20-6.yaml diff --git a/changelog/fragments/1738234199-heartbeat-upgrade-nodejs-18-20-6.yaml b/changelog/fragments/1738234199-heartbeat-upgrade-nodejs-18-20-6.yaml new file mode 100644 index 00000000000..281b634e42a --- /dev/null +++ b/changelog/fragments/1738234199-heartbeat-upgrade-nodejs-18-20-6.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: other + +# Change summary; a 80ish characters long description of the change. +summary: Upgrade NodeJS to LTS 18.20.6 + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: "heartbeat" + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: https://github.com/elastic/elastic-agent/pull/6641 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index bb5dcd57da2..cf529e7bb2c 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -196,7 +196,7 @@ RUN echo \ ENV ELASTIC_SYNTHETICS_CAPABLE=true ENV ELASTIC_AGENT_COMPLETE=true ENV TZ=UTC -ENV NODE_VERSION=18.20.4 +ENV NODE_VERSION=18.20.6 ENV PATH="$NODE_PATH/node/bin:$PATH" # Install the latest version of @elastic/synthetics forcefully ignoring the previously # cached node_modules, heartbeat then calls the global executable to run test suites @@ -259,7 +259,7 @@ USER root # Install required dependencies from wolfi repository RUN for iter in {1..10}; do \ apk update && \ - apk add --no-interactive --no-progress --no-cache nodejs-18=18.20.4-r0 npm=10.8.3-r0 glib dbus-glib libatk-1.0 \ + apk add --no-interactive --no-progress --no-cache nodejs-18=18.20.6-r0 npm=11.1.0-r0 glib dbus-glib libatk-1.0 \ libatk-bridge-2.0 cups-libs libxcomposite libxdamage libxrandr libxkbcommon pango alsa-lib \ font-opensans fontconfig gtk icu-data-full libnss mesa font-noto-cjk font-noto-emoji && \ exit_code=0 && break || exit_code=$? && echo "apk error: retry $iter in 10s" && sleep 10; \ From cc84f9ec9f03c5331cd036c5e38b0c8354aef612 Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Thu, 30 Jan 2025 15:21:37 -0300 Subject: [PATCH 11/17] chore: add linter that denies importing math/rand (#6650) --- .golangci.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 6047e1829ba..bcfef9a3d8f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -54,7 +54,8 @@ linters: - noctx # noctx finds sending http request without context.Context - unconvert # Remove unnecessary type conversions - wastedassign # wastedassign finds wasted assignment statements. - - gomodguard # check for blocked dependencies + - depguard # check for blocked imports from Go files + - gomodguard # check for blocked imports from go.mod - gomoddirectives # all available settings of specific linters @@ -88,9 +89,9 @@ linters-settings: goimports: local-prefixes: github.com/elastic + # Check for blocked dependencies in go.mod. gomodguard: blocked: - # List of blocked modules. modules: # Blocked module. - github.com/pkg/errors: @@ -105,6 +106,15 @@ linters-settings: - github.com/gofrs/uuid/v5 reason: "Use one uuid library consistently across the codebase" + # Check for blocked imports in Go files. + depguard: + rules: + main: + list-mode: lax + deny: + - pkg: "math/rand$" + desc: "superseded by math/rand/v2" + gomoddirectives: # Forbid local `replace` directives replace-local: false From 56373fc6b30108548faeb0d0c9a6f74aab5ccca1 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 31 Jan 2025 06:49:43 +0100 Subject: [PATCH 12/17] mergify: remove backport-8.x enforcement (#6654) --- .mergify.yml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 7816e13d86b..3b523032f6f 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -123,20 +123,6 @@ pull_request_rules: To fixup this pull request, you need to add the backport labels for the needed branches, such as: * `backport-./d./d` is the label to automatically backport to the `8./d` branch. `/d` is the digit - - name: add backport-8.x label for main only if no skipped or assigned already - conditions: - - -label~=^(backport-skip|backport-8.x)$ - - base=main - - -merged - - -closed - actions: - comment: - message: | - `backport-v8.x` has been added to help with the transition to the new branch `8.x`. - If you don't need it please use `backport-skip` label and remove the `backport-8.x` label. - label: - add: - - backport-8.x - name: backport patches to 7.17 branch conditions: - merged From 4d98382e27245727cf5929c2cd401656fcb81bfa Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 08:49:29 +0100 Subject: [PATCH 13/17] use centralized version qualifier (#6651) (#6658) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * use centralized version qualifier for elastic-agent DRA flow (cherry picked from commit 297af6daa4aba127d25d613ee5c238509271b706) Co-authored-by: Paolo Chilà --- .../pipeline.elastic-agent-binary-dra.yml | 6 +++-- .buildkite/scripts/version_qualifier.sh | 22 +++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) create mode 100755 .buildkite/scripts/version_qualifier.sh diff --git a/.buildkite/pipeline.elastic-agent-binary-dra.yml b/.buildkite/pipeline.elastic-agent-binary-dra.yml index eab9734bb14..d6b640b2982 100644 --- a/.buildkite/pipeline.elastic-agent-binary-dra.yml +++ b/.buildkite/pipeline.elastic-agent-binary-dra.yml @@ -47,8 +47,9 @@ steps: if: build.branch =~ /^[0-9]+\.[0-9x]+\$/ || build.env("RUN_STAGING") == "true" || build.env('VERSION_QUALIFIER') != null steps: - label: ":package: Build Elastic-Agent Core staging" - commands: - - .buildkite/scripts/steps/build-agent-core.sh + commands: | + source .buildkite/scripts/version_qualifier.sh + .buildkite/scripts/steps/build-agent-core.sh key: "build-dra-staging" artifact_paths: - "build/distributions/**/*" @@ -62,6 +63,7 @@ steps: - label: ":hammer: DRA Publish Elastic-Agent Core staging" command: | + source .buildkite/scripts/version_qualifier.sh echo "+++ Restoring Artifacts" buildkite-agent artifact download "build/**/*" . echo "+++ Changing permissions for the release manager" diff --git a/.buildkite/scripts/version_qualifier.sh b/.buildkite/scripts/version_qualifier.sh new file mode 100755 index 00000000000..88d172dad58 --- /dev/null +++ b/.buildkite/scripts/version_qualifier.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# An opinionated approach to managing the Elastic Qualifier for the DRA in a Google Bucket +# instead of using a Buildkite env variable. + +if [[ -n "$VERSION_QUALIFIER" ]]; then + echo "~~~ VERSION_QUALIFIER externally set to [$VERSION_QUALIFIER]" + return 0 +fi + +# DRA_BRANCH can be used for manually testing packaging with PRs +# e.g. define `DRA_BRANCH="main"` under Options/Environment Variables in the Buildkite UI after clicking new Build +BRANCH="${DRA_BRANCH:="${BUILDKITE_BRANCH:=""}"}" + +qualifier="" +URL="https://storage.googleapis.com/dra-qualifier/${BRANCH}" +if curl -sf -o /dev/null "$URL" ; then + qualifier=$(curl -s "$URL") +fi + +export VERSION_QUALIFIER="$qualifier" +echo "~~~ VERSION_QUALIFIER set to [$VERSION_QUALIFIER]" From fc0939e051cb8b7429532783103cc4c7dff18446 Mon Sep 17 00:00:00 2001 From: "elastic-vault-github-plugin-prod[bot]" <150874479+elastic-vault-github-plugin-prod[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:16:41 +0100 Subject: [PATCH 14/17] Bump the version on main to 9.1.0 (#6649) * [Release] update version * update k8s files * fix: CI integration tests by forcing agent version to 9.0.0 * skip TestPreviousMinor --------- Co-authored-by: elasticmachine Co-authored-by: Panos Koutsovasilis Co-authored-by: Julien Lind Co-authored-by: Paolo Chila --- .../edot-collector/kube-stack/values.yaml | 2 +- deploy/helm/elastic-agent/Chart.yaml | 4 +- .../examples/eck/rendered/manifest.yaml | 48 +++++++++---------- .../rendered/manifest.yaml | 2 +- .../fleet-managed/rendered/manifest.yaml | 22 ++++----- .../kubernetes-default/rendered/manifest.yaml | 44 ++++++++--------- .../rendered/manifest.yaml | 44 ++++++++--------- .../rendered/manifest.yaml | 46 +++++++++--------- .../rendered/manifest.yaml | 22 ++++----- .../rendered/manifest.yaml | 44 ++++++++--------- .../netflow-service/rendered/manifest.yaml | 14 +++--- .../rendered/manifest.yaml | 10 ++-- .../rendered/manifest.yaml | 22 ++++----- .../user-cluster-role/rendered/manifest.yaml | 18 +++---- .../rendered/manifest.yaml | 36 +++++++------- deploy/helm/elastic-agent/values.yaml | 4 +- .../base/elastic-agent-managed-daemonset.yaml | 2 +- .../elastic-agent-standalone-daemonset.yaml | 6 +-- .../base/elastic-agent-managed-daemonset.yaml | 2 +- .../elastic-agent-managed-statefulset.yaml | 2 +- .../elastic-agent-standalone-daemonset.yaml | 6 +-- .../elastic-agent-standalone-statefulset.yaml | 6 +-- .../kustomization.yaml | 4 +- .../elastic-agent-managed-kubernetes.yaml | 2 +- .../elastic-agent-standalone-kubernetes.yaml | 6 +-- testing/upgradetest/versions_test.go | 1 + version/version.go | 2 +- 27 files changed, 211 insertions(+), 210 deletions(-) diff --git a/deploy/helm/edot-collector/kube-stack/values.yaml b/deploy/helm/edot-collector/kube-stack/values.yaml index 35782ad9ff0..8e8b493ea18 100644 --- a/deploy/helm/edot-collector/kube-stack/values.yaml +++ b/deploy/helm/edot-collector/kube-stack/values.yaml @@ -17,7 +17,7 @@ crds: defaultCRConfig: image: repository: "docker.elastic.co/beats/elastic-agent" - tag: "9.0.0" + tag: "9.1.0" targetAllocator: enabled: false # Enable/disable the Operator's Target allocator. # Refer to: https://github.com/open-telemetry/opentelemetry-operator/tree/main/cmd/otel-allocator diff --git a/deploy/helm/elastic-agent/Chart.yaml b/deploy/helm/elastic-agent/Chart.yaml index 8e4d5a21dc5..cea7b71f1d8 100644 --- a/deploy/helm/elastic-agent/Chart.yaml +++ b/deploy/helm/elastic-agent/Chart.yaml @@ -3,8 +3,8 @@ name: elastic-agent description: Elastic-Agent Helm Chart kubeVersion: ">= 1.27.0-0" type: application -appVersion: 9.0.0 -version: 9.0.0-beta +appVersion: 9.1.0 +version: 9.1.0-beta dependencies: - name: kube-state-metrics version: "5.28.0" diff --git a/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml index 0a94afa4690..9dc6275050e 100644 --- a/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/eck/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic --- @@ -36,10 +36,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic --- @@ -50,10 +50,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic stringData: @@ -288,10 +288,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic stringData: @@ -620,10 +620,10 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic rules: @@ -702,10 +702,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic rules: @@ -806,10 +806,10 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic subjects: @@ -827,10 +827,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic subjects: @@ -962,14 +962,14 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic spec: - version: 9.0.0 + version: 9.1.0 configRef: secretName: agent-pernode-example elasticsearchRefs: @@ -993,7 +993,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1041,14 +1041,14 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: eck.k8s.elastic.co/license: basic spec: - version: 9.0.0 + version: 9.1.0 configRef: secretName: agent-clusterwide-example elasticsearchRefs: @@ -1072,7 +1072,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/fleet-managed-ksm-sharding/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/fleet-managed-ksm-sharding/rendered/manifest.yaml index 2228a6798a0..657403af7c4 100644 --- a/deploy/helm/elastic-agent/examples/fleet-managed-ksm-sharding/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/fleet-managed-ksm-sharding/rendered/manifest.yaml @@ -415,7 +415,7 @@ spec: value: "false" - name: FLEET_ENROLL value: "1" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml index 4dc567a16b7..c28502e9c80 100644 --- a/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/fleet-managed/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -34,10 +34,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -212,10 +212,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -314,10 +314,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -362,10 +362,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -404,7 +404,7 @@ spec: value: "false" - name: FLEET_ENROLL value: "1" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml index d5c609a4c2b..def214f2e32 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-default/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/service-account.yaml apiVersion: v1 @@ -34,10 +34,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -46,10 +46,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -289,10 +289,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -626,10 +626,10 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -706,10 +706,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -808,10 +808,10 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-clusterwide-example @@ -827,10 +827,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -875,10 +875,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -909,7 +909,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1056,10 +1056,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -1090,7 +1090,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml index 87beb58f128..b2b5ef81851 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-hints-autodiscover/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/service-account.yaml apiVersion: v1 @@ -34,10 +34,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -46,10 +46,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -289,10 +289,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -629,10 +629,10 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -709,10 +709,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -811,10 +811,10 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-clusterwide-example @@ -830,10 +830,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -878,10 +878,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -912,7 +912,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1059,10 +1059,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -1093,7 +1093,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/kubernetes-ksm-sharding/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-ksm-sharding/rendered/manifest.yaml index 1993d07a8ef..09d8388e356 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-ksm-sharding/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-ksm-sharding/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/service-account.yaml apiVersion: v1 @@ -34,10 +34,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -46,10 +46,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -102,10 +102,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -631,10 +631,10 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -711,10 +711,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -813,10 +813,10 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-clusterwide-example @@ -832,10 +832,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -936,10 +936,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -980,7 +980,7 @@ spec: secretKeyRef: key: api_key name: es-api-secret - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1042,10 +1042,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -1086,7 +1086,7 @@ spec: secretKeyRef: key: api_key name: es-api-secret - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1250,7 +1250,7 @@ spec: secretKeyRef: key: api_key name: es-api-secret - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml index 599b020ad40..f7f3eebb623 100644 --- a/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/kubernetes-only-logs/rendered/manifest.yaml @@ -6,10 +6,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -18,10 +18,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -110,10 +110,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -190,10 +190,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -210,10 +210,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -244,7 +244,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml index 1acea05f377..705f09321bb 100644 --- a/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/multiple-integrations/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/service-account.yaml apiVersion: v1 @@ -34,10 +34,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -46,10 +46,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -315,10 +315,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -655,10 +655,10 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -735,10 +735,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -837,10 +837,10 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-clusterwide-example @@ -856,10 +856,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -904,10 +904,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -938,7 +938,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1075,10 +1075,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -1109,7 +1109,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/netflow-service/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/netflow-service/rendered/manifest.yaml index b768f25fd14..bc4e22a5d31 100644 --- a/deploy/helm/elastic-agent/examples/netflow-service/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/netflow-service/rendered/manifest.yaml @@ -6,10 +6,10 @@ metadata: name: agent-netflow-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -69,10 +69,10 @@ metadata: name: agent-netflow-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: type: ClusterIP selector: @@ -89,10 +89,10 @@ metadata: name: agent-netflow-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -131,7 +131,7 @@ spec: secretKeyRef: key: api_key name: es-api-secret - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent ports: diff --git a/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml index 1654f966cc8..b2ce0117c0a 100644 --- a/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/nginx-custom-integration/rendered/manifest.yaml @@ -6,10 +6,10 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -61,10 +61,10 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -93,7 +93,7 @@ spec: fieldPath: metadata.name - name: STATE_PATH value: /usr/share/elastic-agent/state - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent securityContext: diff --git a/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml index 8b9b28ebc31..411c333b3b9 100644 --- a/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/system-custom-auth-paths/rendered/manifest.yaml @@ -6,10 +6,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 --- # Source: elastic-agent/templates/agent/k8s/secret.yaml apiVersion: v1 @@ -18,10 +18,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -182,10 +182,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 rules: - apiGroups: [ "" ] # "" indicates the core API group resources: @@ -262,10 +262,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: agent-pernode-example @@ -282,10 +282,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -316,7 +316,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml index c878dcd71aa..08b0dde90f3 100644 --- a/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/user-cluster-role/rendered/manifest.yaml @@ -6,10 +6,10 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: elastic-agent.k8s.elastic.co/preset: nginx elastic-agent.k8s.elastic.co/sa: nginx @@ -21,10 +21,10 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: elastic-agent.k8s.elastic.co/preset: nginx stringData: @@ -77,10 +77,10 @@ kind: ClusterRoleBinding metadata: name: agent-nginx-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: elastic-agent.k8s.elastic.co/preset: nginx subjects: @@ -99,10 +99,10 @@ metadata: name: agent-nginx-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -132,7 +132,7 @@ spec: fieldPath: metadata.name - name: STATE_PATH value: /usr/share/elastic-agent/state - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent securityContext: diff --git a/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml b/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml index 46f2be3a360..c22e6350f2a 100644 --- a/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml +++ b/deploy/helm/elastic-agent/examples/user-service-account/rendered/manifest.yaml @@ -22,10 +22,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -265,10 +265,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 stringData: agent.yml: |- @@ -602,10 +602,10 @@ kind: ClusterRole metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: elastic-agent.k8s.elastic.co/cr: nginx rules: @@ -684,10 +684,10 @@ kind: ClusterRole metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 annotations: elastic-agent.k8s.elastic.co/cr: nginx rules: @@ -788,10 +788,10 @@ kind: ClusterRoleBinding metadata: name: agent-clusterWide-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: user-sa-clusterWide @@ -807,10 +807,10 @@ kind: ClusterRoleBinding metadata: name: agent-perNode-example-default labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 subjects: - kind: ServiceAccount name: user-sa-perNode @@ -855,10 +855,10 @@ metadata: name: agent-pernode-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -889,7 +889,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: @@ -1036,10 +1036,10 @@ metadata: name: agent-clusterwide-example namespace: "default" labels: - helm.sh/chart: elastic-agent-9.0.0-beta + helm.sh/chart: elastic-agent-9.1.0-beta app.kubernetes.io/name: elastic-agent app.kubernetes.io/instance: example - app.kubernetes.io/version: 9.0.0 + app.kubernetes.io/version: 9.1.0 spec: selector: matchLabels: @@ -1070,7 +1070,7 @@ spec: value: /usr/share/elastic-agent/state - name: ELASTIC_NETINFO value: "false" - image: docker.elastic.co/beats/elastic-agent:9.0.0-SNAPSHOT + image: docker.elastic.co/beats/elastic-agent:9.1.0-SNAPSHOT imagePullPolicy: IfNotPresent name: agent resources: diff --git a/deploy/helm/elastic-agent/values.yaml b/deploy/helm/elastic-agent/values.yaml index 9f523542a25..27f9e5a20b2 100644 --- a/deploy/helm/elastic-agent/values.yaml +++ b/deploy/helm/elastic-agent/values.yaml @@ -332,13 +332,13 @@ extraIntegrations: {} agent: # -- elastic-agent version # @section -- 6 - Elastic-Agent Configuration - version: 9.0.0 + version: 9.1.0 # -- image configuration # @section -- 6 - Elastic-Agent Configuration image: repository: docker.elastic.co/beats/elastic-agent pullPolicy: IfNotPresent - tag: "9.0.0-SNAPSHOT" + tag: "9.1.0-SNAPSHOT" # -- image pull secrets # @section -- 6 - Elastic-Agent Configuration imagePullSecrets: [] diff --git a/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml index 79b77605c69..9c80f806724 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml @@ -27,7 +27,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 env: # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL diff --git a/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml index 684fffa2ef0..5bb88496fb9 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/default/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml @@ -28,19 +28,19 @@ spec: # Uncomment if using hints feature #initContainers: # - name: k8s-templates-downloader - # image: docker.elastic.co/beats/elastic-agent:9.0.0 + # image: docker.elastic.co/beats/elastic-agent:9.1.0 # command: ['bash'] # args: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # curl -sL https://github.com/elastic/elastic-agent/archive/9.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0/deploy/kubernetes/elastic-agent-standalone/templates.d" + # curl -sL https://github.com/elastic/elastic-agent/archive/9.1.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.1/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 args: ["-c", "/etc/elastic-agent/agent.yml", "-e"] env: # The API Key with access privilleges to connect to Elasticsearch. https://www.elastic.co/guide/en/fleet/current/grant-access-to-elasticsearch.html#create-api-key-standalone-agent diff --git a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml index 9d80b74b7f8..53390768a43 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/base/elastic-agent-managed-daemonset.yaml @@ -27,7 +27,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 env: # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL diff --git a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/extra/elastic-agent-managed-statefulset.yaml b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/extra/elastic-agent-managed-statefulset.yaml index 25f7c3822d3..69cdfb9ef96 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/extra/elastic-agent-managed-statefulset.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-managed/extra/elastic-agent-managed-statefulset.yaml @@ -27,7 +27,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 env: # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL diff --git a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml index 0573903543c..5c00fe13b04 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/base/elastic-agent-standalone-daemonset.yaml @@ -28,19 +28,19 @@ spec: # Uncomment if using hints feature #initContainers: # - name: k8s-templates-downloader - # image: docker.elastic.co/beats/elastic-agent:9.0.0 + # image: docker.elastic.co/beats/elastic-agent:9.1.0 # command: ['bash'] # args: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # curl -sL https://github.com/elastic/elastic-agent/archive/9.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0/deploy/kubernetes/elastic-agent-standalone/templates.d" + # curl -sL https://github.com/elastic/elastic-agent/archive/9.1.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.1/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 args: ["-c", "/etc/elastic-agent/agent.yml", "-e"] env: # The API Key with access privilleges to connect to Elasticsearch. https://www.elastic.co/guide/en/fleet/current/grant-access-to-elasticsearch.html#create-api-key-standalone-agent diff --git a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/extra/elastic-agent-standalone-statefulset.yaml b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/extra/elastic-agent-standalone-statefulset.yaml index 324ed435986..f433f4adb56 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/extra/elastic-agent-standalone-statefulset.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/ksm-autosharding/elastic-agent-standalone/extra/elastic-agent-standalone-statefulset.yaml @@ -28,19 +28,19 @@ spec: # Uncomment if using hints feature #initContainers: # - name: k8s-templates-downloader - # image: docker.elastic.co/beats/elastic-agent:9.0.0 + # image: docker.elastic.co/beats/elastic-agent:9.1.0 # command: ['bash'] # args: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # curl -sL https://github.com/elastic/elastic-agent/archive/9.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0/deploy/kubernetes/elastic-agent-standalone/templates.d" + # curl -sL https://github.com/elastic/elastic-agent/archive/9.1.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.1/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 args: ["-c", "/etc/elastic-agent/agent.yml", "-e"] env: # The API Key with access privilleges to connect to Elasticsearch. https://www.elastic.co/guide/en/fleet/current/grant-access-to-elasticsearch.html#create-api-key-standalone-agent diff --git a/deploy/kubernetes/elastic-agent-kustomize/ksm-hints/elastic-agent-standalone/kustomization.yaml b/deploy/kubernetes/elastic-agent-kustomize/ksm-hints/elastic-agent-standalone/kustomization.yaml index f479c3ff2bd..d8aeedb8dc2 100644 --- a/deploy/kubernetes/elastic-agent-kustomize/ksm-hints/elastic-agent-standalone/kustomization.yaml +++ b/deploy/kubernetes/elastic-agent-kustomize/ksm-hints/elastic-agent-standalone/kustomization.yaml @@ -19,13 +19,13 @@ patches: spec: initContainers: - name: k8s-templates-downloader - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 command: ['bash'] args: - -c - >- mkdir -p /etc/elastic-agent/inputs.d && - curl -sL https://github.com/elastic/elastic-agent/archive/9.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0/deploy/kubernetes/elastic-agent-standalone/templates.d" + curl -sL https://github.com/elastic/elastic-agent/archive/9.1.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.1/deploy/kubernetes/elastic-agent-standalone/templates.d" volumeMounts: - mountPath: /etc/elastic-agent/inputs.d name: external-inputs diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index ec38e2b3d02..377ffb84c58 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -27,7 +27,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 env: # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 7c0a2fa2d21..0797b92080a 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -697,19 +697,19 @@ spec: # Uncomment if using hints feature #initContainers: # - name: k8s-templates-downloader - # image: docker.elastic.co/beats/elastic-agent:9.0.0 + # image: docker.elastic.co/beats/elastic-agent:9.1.0 # command: ['bash'] # args: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # curl -sL https://github.com/elastic/elastic-agent/archive/9.0.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.0/deploy/kubernetes/elastic-agent-standalone/templates.d" + # curl -sL https://github.com/elastic/elastic-agent/archive/9.1.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-9.1/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone - image: docker.elastic.co/beats/elastic-agent:9.0.0 + image: docker.elastic.co/beats/elastic-agent:9.1.0 args: ["-c", "/etc/elastic-agent/agent.yml", "-e"] env: # The API Key with access privilleges to connect to Elasticsearch. https://www.elastic.co/guide/en/fleet/current/grant-access-to-elasticsearch.html#create-api-key-standalone-agent diff --git a/testing/upgradetest/versions_test.go b/testing/upgradetest/versions_test.go index 34aa276e54d..65471eab545 100644 --- a/testing/upgradetest/versions_test.go +++ b/testing/upgradetest/versions_test.go @@ -88,6 +88,7 @@ func TestGetUpgradableVersions(t *testing.T) { } func TestPreviousMinor(t *testing.T) { + t.Skip("Skipped until there are at least 2 minors available or https://github.com/elastic/elastic-agent/issues/6667 is solved") currentParsed, err := version.ParseVersion(bversion.Agent) require.NoError(t, err) diff --git a/version/version.go b/version/version.go index 1f74814ee9a..0174d9b7c1c 100644 --- a/version/version.go +++ b/version/version.go @@ -4,5 +4,5 @@ package version -const defaultBeatVersion = "9.0.0" +const defaultBeatVersion = "9.1.0" const Agent = defaultBeatVersion From 3145177fdda34961d166ecb809674d6d092d7fa5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:32:56 +0100 Subject: [PATCH 15/17] [Automation] Bump Golang version to 1.22.11 (#6663) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update version.asciidoc Made with ❤️️ by updatecli * chore: Update .golangci.yml Made with ❤️️ by updatecli * chore: Update from dockerfiles Made with ❤️️ by updatecli * chore: Update go.mod version Made with ❤️️ by updatecli --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .golangci.yml | 8 ++++---- Dockerfile | 2 +- Dockerfile.skaffold | 2 +- go.mod | 2 +- version/docs/version.asciidoc | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index bcfef9a3d8f..ff783454433 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -130,7 +130,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.22.10" + go: "1.22.11" nolintlint: # Enable to ensure that nolint directives are all used. Default is true. @@ -146,17 +146,17 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.22.10" + go: "1.22.11" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.22.10" + go: "1.22.11" checks: ["all"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.22.10" + go: "1.22.11" gosec: excludes: diff --git a/Dockerfile b/Dockerfile index 5aa38597904..81c7b489b58 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22.10 +ARG GO_VERSION=1.22.11 FROM circleci/golang:${GO_VERSION} diff --git a/Dockerfile.skaffold b/Dockerfile.skaffold index 54c55718b6c..9e86e69530c 100644 --- a/Dockerfile.skaffold +++ b/Dockerfile.skaffold @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.22.10 +ARG GO_VERSION=1.22.11 ARG crossbuild_image="docker.elastic.co/beats-dev/golang-crossbuild" ARG AGENT_VERSION=8.9.0-SNAPSHOT ARG AGENT_IMAGE="docker.elastic.co/beats/elastic-agent" diff --git a/go.mod b/go.mod index bcab266a17b..9df23e2ffab 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/elastic-agent -go 1.22.10 +go 1.22.11 require ( github.com/Jeffail/gabs/v2 v2.6.0 diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 5f46c27bd9a..64268931fc8 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -3,7 +3,7 @@ // FIXME: once elastic.co docs have been switched over to use `main`, remove // the `doc-site-branch` line below as well as any references to it in the code. :doc-site-branch: master -:go-version: 1.22.10 +:go-version: 1.22.11 :release-state: unreleased :python: 3.7 :docker: 1.12 From 8492a35fa23952f44ed24fc650c47f8d86676549 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Fri, 31 Jan 2025 12:04:27 +0100 Subject: [PATCH 16/17] Fix dry run in dra publish script (#6670) * mark and skip flaky TestOTelManager_Run * Fix dry run in dra publish script --- .buildkite/scripts/steps/dra-publish.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/scripts/steps/dra-publish.sh b/.buildkite/scripts/steps/dra-publish.sh index 1edacb0f76d..bc584733a62 100755 --- a/.buildkite/scripts/steps/dra-publish.sh +++ b/.buildkite/scripts/steps/dra-publish.sh @@ -73,7 +73,7 @@ function run_release_manager_collect() { --workflow "${_workflow}" \ --version "${_version}" \ --artifact-set "${_artifact_set}" \ - --qualifier "${VERSION_QUALIFIER}" + --qualifier "${VERSION_QUALIFIER}" \ ${_dry_run} } From cdb574ae036c5ae07015b066ef265e6a635fd0ef Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Fri, 31 Jan 2025 14:21:02 +0200 Subject: [PATCH 17/17] Update branch filters to include/exclude 9.* (#6665) * Update branch filters to include/exclude 9.* This commit updates branch_configuration and branch filters in CI pipeline definitions to support the new 9.* branches. * Fix typo --------- Co-authored-by: Julien Lind --- catalog-info.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 1d211e27e2f..ee8c8fdbb4e 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -37,7 +37,7 @@ spec: name: elastic-agent description: Buildkite pipeline for the Elastic Agent project spec: - branch_configuration: "main 7.* 8.* v7.* v8.*" + branch_configuration: "main 7.* 8.* 9.* v7.* v8.* v9.*" repository: elastic/elastic-agent pipeline_file: ".buildkite/pipeline.yml" provider_settings: @@ -216,7 +216,7 @@ spec: name: elastic-agent-binary-dra description: Buildkite pipeline for packaging Elastic Agent core binary and publish it to DRA spec: - branch_configuration: "main 7.* 8.* v7.* v8.*" + branch_configuration: "main 7.* 8.* 9.* v7.* v8.* v9.*" pipeline_file: ".buildkite/pipeline.elastic-agent-binary-dra.yml" provider_settings: build_pull_request_forks: false