diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index 34321b61161b..147ca45ced16 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -1,5 +1,137 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + IMAGE_RHEL9: "family/core-rhel-9" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + steps: - - label: "Example test" - command: echo "Hello!" + - group: "Auditbeat Mandatory Testing" + key: "mandatory-tests" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat" || build.env("BUILDKITE_PULL_REQUEST") != "false" + + steps: + - label: ":ubuntu: Unit Tests" + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - label: ":rhel: Unit Tests" + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_RHEL9}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - label: ":windows:-{{matrix.image}} Unit Tests" + command: ".buildkite/auditbeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2022}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - label: ":linux: Crosscompile" + command: + - ".buildkite/auditbeat/scripts/crosscompile.sh" + env: + GOX_FLAGS: "-arch amd64" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + + - group: "Extended Testing" + key: "extended-tests" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" || build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for extended support" + + steps: + - label: ":linux: ARM64 Unit Tests" + key: "arm-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for arm" || build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat/Extended: Unit Tests ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" + artifact_paths: "auditbeat/build/*.xml" + + - label: ":mac: MacOS Unit Tests" + key: "macos-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for macos" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat/Extended: MacOS Unit Tests" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "auditbeat/build/*.xml" + + - group: "Windows Extended Testing" + key: "extended-tests-win" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for windows" || build.env("GITHUB_PR_LABELS") =~ /.*windows.*/ + + steps: + - label: ":windows: Win 2019 Unit Tests" + key: "win-extended-2019" + command: ".buildkite/auditbeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "auditbeat/Extended: Win-2019 Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_WIN_2019}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - group: "Packaging" + key: "packaging" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: + - "mandatory-tests" + + steps: + - label: Package pipeline + commands: ".buildkite/auditbeat/scripts/package-step.sh | buildkite-agent pipeline upload" diff --git a/.buildkite/auditbeat/scripts/crosscompile.sh b/.buildkite/auditbeat/scripts/crosscompile.sh new file mode 100755 index 000000000000..866d6be42239 --- /dev/null +++ b/.buildkite/auditbeat/scripts/crosscompile.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Executing Crosscompile" +make -C auditbeat crosscompile diff --git a/.buildkite/auditbeat/scripts/package-step.sh b/.buildkite/auditbeat/scripts/package-step.sh new file mode 100755 index 000000000000..021240589923 --- /dev/null +++ b/.buildkite/auditbeat/scripts/package-step.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/util.sh + +changeset="^auditbeat/ +^go.mod +^pytest.ini +^dev-tools/ +^libbeat/ +^testing/ +^\.buildkite/auditbeat/" + +if are_files_changed "$changeset"; then + cat <<-EOF + steps: + - label: ":ubuntu: Packaging Linux X86" + key: "package-linux-x86" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + command: + - ".buildkite/auditbeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Auditbeat/Packaging: Linux X86" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + + - label: ":linux: Packaging Linux ARM" + key: "package-linux-arm" + env: + PLATFORMS: "linux/arm64" + PACKAGES: "docker" + command: + - ".buildkite/auditbeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Auditbeat/Packaging: ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" +EOF +fi diff --git a/.buildkite/auditbeat/scripts/package.sh b/.buildkite/auditbeat/scripts/package.sh new file mode 100755 index 000000000000..71872ca15a35 --- /dev/null +++ b/.buildkite/auditbeat/scripts/package.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Docker Version: $(docker --version)" + +echo "--- Start Packaging" +cd auditbeat +umask 0022 +mage package + diff --git a/.buildkite/auditbeat/scripts/unit-tests-win.ps1 b/.buildkite/auditbeat/scripts/unit-tests-win.ps1 new file mode 100644 index 000000000000..200627d518f0 --- /dev/null +++ b/.buildkite/auditbeat/scripts/unit-tests-win.ps1 @@ -0,0 +1,51 @@ +$ErrorActionPreference = "Stop" # set -e +$GoVersion = $env:GOLANG_VERSION # If Choco doesn't have the version specified in .go-version file, should be changed manually + +# Forcing to checkout again all the files with a correct autocrlf. +# Doing this here because we cannot set git clone options before. +function fixCRLF() { + Write-Host "--- Fixing CRLF in git checkout --" + git config core.autocrlf false + git rm --quiet --cached -r . + git reset --quiet --hard +} + +function withGolang() { + Write-Host "--- Install golang $GoVersion --" + choco install golang -y --version $GoVersion + + $choco = Convert-Path "$((Get-Command choco).Path)\..\.." + Import-Module "$choco\helpers\chocolateyProfile.psm1" + refreshenv + go version + go env +} + +function installGoDependencies() { + $installPackages = @( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "github.com/tebeka/go2xunit" + ) + foreach ($pkg in $installPackages) { + go install "$pkg" + } +} + +fixCRLF + +$ErrorActionPreference = "Continue" # set +e + +Set-Location -Path auditbeat +New-Item -ItemType Directory -Force -Path "build" +withGolang +installGoDependencies + +mage build unitTest + +$EXITCODE=$LASTEXITCODE +$ErrorActionPreference = "Stop" + +Exit $EXITCODE diff --git a/.buildkite/auditbeat/scripts/unit-tests.sh b/.buildkite/auditbeat/scripts/unit-tests.sh new file mode 100755 index 000000000000..c1f5685c77fe --- /dev/null +++ b/.buildkite/auditbeat/scripts/unit-tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Running Unit Tests" +sudo chmod -R go-w auditbeat/ + +cd auditbeat +umask 0022 +mage build unitTest diff --git a/.buildkite/env-scripts/env.sh b/.buildkite/env-scripts/env.sh new file mode 100644 index 000000000000..4dfc01bafc34 --- /dev/null +++ b/.buildkite/env-scripts/env.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +SETUP_GVM_VERSION="v0.5.1" +WORKSPACE="$(pwd)" +BIN="${WORKSPACE}/bin" +HW_TYPE="$(uname -m)" +PLATFORM_TYPE="$(uname)" +REPO="beats" +TMP_FOLDER="tmp.${REPO}" +DOCKER_REGISTRY="docker.elastic.co" + +export SETUP_GVM_VERSION +export WORKSPACE +export BIN +export HW_TYPE +export PLATFORM_TYPE +export REPO +export TMP_FOLDER +export DOCKER_REGISTRY diff --git a/.buildkite/env-scripts/linux-env.sh b/.buildkite/env-scripts/linux-env.sh new file mode 100644 index 000000000000..5e6e5f7cbf05 --- /dev/null +++ b/.buildkite/env-scripts/linux-env.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/util.sh + +DEBIAN_FRONTEND="noninteractive" + +sudo mkdir -p /etc/needrestart +echo "\$nrconf{restart} = 'a';" | sudo tee -a /etc/needrestart/needrestart.conf > /dev/null + +echo "--- PLATFORM TYPE $PLATFORM_TYPE" + +if [[ $PLATFORM_TYPE == "Linux" ]]; then + # Remove this code once beats specific agent is set up + if grep -q 'Ubuntu' /etc/*release; then + export DEBIAN_FRONTEND + + echo "--- Ubuntu - Installing libs" + sudo apt-get update + sudo apt-get install -y libsystemd-dev + sudo apt install -y python3-pip + sudo apt-get install -y python3-venv + fi + + # Remove this code once beats specific agent is set up + if grep -q 'Red Hat' /etc/*release; then + echo "--- RHL - Installing libs" + sudo yum update -y + sudo yum install -y systemd-devel + sudo yum install -y python3-pip + sudo yum install -y python3 + pip3 install virtualenv + fi +fi + +if [[ $PLATFORM_TYPE == Darwin* ]]; then + echo "--- Setting larger ulimit on MacOS" + # To bypass file descriptor errors like "Too many open files error" on MacOS + ulimit -Sn 50000 + echo "--- ULIMIT: $(ulimit -n)" +fi + +echo "--- Setting up environment" +add_bin_path +with_go +with_mage diff --git a/.buildkite/env-scripts/util.sh b/.buildkite/env-scripts/util.sh new file mode 100644 index 000000000000..7aef69cff389 --- /dev/null +++ b/.buildkite/env-scripts/util.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +set -euo pipefail + +add_bin_path() { + echo "Adding PATH to the environment variables..." + create_bin + export PATH="${PATH}:${BIN}" +} + +with_go() { + local go_version="${GOLANG_VERSION}" + echo "Setting up the Go environment..." + create_bin + check_platform_architecture + retry 5 curl -sL -o ${BIN}/gvm "https://github.com/andrewkroh/gvm/releases/download/${SETUP_GVM_VERSION}/gvm-${PLATFORM_TYPE}-${arch_type}" + export PATH="${PATH}:${BIN}" + chmod +x ${BIN}/gvm + eval "$(gvm "$go_version")" + go version + which go + export PATH="${PATH}:$(go env GOPATH):$(go env GOPATH)/bin" +} + +with_mage() { + local install_packages=( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "gotest.tools/gotestsum" + ) + create_bin + for pkg in "${install_packages[@]}"; do + go install "${pkg}@latest" + done +} + +create_bin() { + if [[ ! -d "${BIN}" ]]; then + mkdir -p ${BIN} + fi +} + +check_platform_architecture() { +# for downloading the GVM and Terraform packages + case "${HW_TYPE}" in + "x86_64") + arch_type="amd64" + ;; + "aarch64") + arch_type="arm64" + ;; + "arm64") + arch_type="arm64" + ;; + *) + echo "The current platform/OS type is unsupported yet" + ;; + esac +} + +retry() { + local retries=$1 + shift + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + >&2 echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + else + >&2 echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + done + return 0 +} + +are_files_changed() { + local changeset=$1 + + if git diff --name-only HEAD@{1} HEAD | grep -qE "$changeset"; then + return 0; + else + echo "WARN! No files changed in $changeset" + return 1; + fi +} + +cleanup() { + echo "Deleting temporary files..." + rm -rf ${BIN}/${TMP_FOLDER}.* + echo "Done." +} + +unset_secrets () { + for var in $(printenv | sed 's;=.*;;' | sort); do + if [[ "$var" == *_SECRET || "$var" == *_TOKEN ]]; then + unset "$var" + fi + done +} diff --git a/.buildkite/env-scripts/win-env.sh b/.buildkite/env-scripts/win-env.sh new file mode 100644 index 000000000000..ccf5479b46e1 --- /dev/null +++ b/.buildkite/env-scripts/win-env.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +echo "--- PLATFORM TYPE: ${PLATFORM_TYPE}" +if [[ ${PLATFORM_TYPE} = MINGW* ]]; then + echo "--- Installing Python on Win" + choco install mingw -y + choco install python --version=3.11.0 -y +fi diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 34321b61161b..eda9fb93a669 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -1,5 +1,140 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + steps: - - label: "Example test" - command: echo "Hello!" + - group: "Filebeat Mandatory Testing" + key: "mandatory-tests" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat" || build.env("BUILDKITE_PULL_REQUEST") != "false" + + steps: + - label: ":ubuntu: Unit Tests" + command: + - ".buildkite/filebeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "Filebeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - label: ":ubuntu: Go Integration Tests" + command: + - ".buildkite/filebeat/scripts/integration-gotests.sh" + notify: + - github_commit_status: + context: "Filebeat: Integration Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - label: ":ubuntu: Python Integration Tests" + command: + - ".buildkite/filebeat/scripts/integration-pytests.sh" + notify: + - github_commit_status: + context: "Filebeat: Python Integration Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - label: ":windows:-{{matrix.image}} Unit Tests" + command: ".buildkite/filebeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "Filebeat: Unit Tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2022}" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - group: "Extended Testing" + key: "extended-tests" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" || build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for extended support" + + steps: + - label: ":linux: ARM64 Unit Tests" + key: "arm-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for arm" || build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ + command: + - ".buildkite/filebeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "Filebeat/Extended: Unit Tests ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" + artifact_paths: "filebeat/build/*.xml" + + - label: ":mac: MacOS Unit Tests" + key: "macos-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for macos" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + command: + - ".buildkite/filebeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "Filebeat/Extended: MacOS Unit Tests" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "filebeat/build/*.xml" + + - group: "Windows Extended Testing" + key: "extended-tests-win" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for windows" || build.env("GITHUB_PR_LABELS") =~ /.*windows.*/ + + steps: + - label: ":windows: Win 2019 Unit Tests" + key: "win-extended-2019" + command: ".buildkite/filebeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "Filebeat/Extended: Win-2019 Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_WIN_2019}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - group: "Packaging" + key: "packaging" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: + - "mandatory-tests" + + steps: + - label: Package pipeline + commands: ".buildkite/filebeat/scripts/package-step.sh | buildkite-agent pipeline upload" diff --git a/.buildkite/filebeat/scripts/integration-gotests.sh b/.buildkite/filebeat/scripts/integration-gotests.sh new file mode 100755 index 000000000000..d64ce7c98eb2 --- /dev/null +++ b/.buildkite/filebeat/scripts/integration-gotests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Executing Integration Tests" +sudo chmod -R go-w filebeat/ + +cd filebeat +umask 0022 +mage goIntegTest diff --git a/.buildkite/filebeat/scripts/integration-pytests.sh b/.buildkite/filebeat/scripts/integration-pytests.sh new file mode 100755 index 000000000000..b51e8ae18a68 --- /dev/null +++ b/.buildkite/filebeat/scripts/integration-pytests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Executing Integration Tests" +sudo chmod -R go-w filebeat/ + +cd filebeat +umask 0022 +mage pythonIntegTest diff --git a/.buildkite/filebeat/scripts/package-step.sh b/.buildkite/filebeat/scripts/package-step.sh new file mode 100755 index 000000000000..985125433cec --- /dev/null +++ b/.buildkite/filebeat/scripts/package-step.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/util.sh + +changeset="^filebeat/ +^go.mod +^pytest.ini +^dev-tools/ +^libbeat/ +^testing/ +^\.buildkite/filebeat/" + +if are_files_changed "$changeset"; then + cat <<-EOF + steps: + - label: ":ubuntu: Packaging Linux X86" + key: "package-linux-x86" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + command: + - ".buildkite/filebeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Filebeat/Packaging: Linux X86" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + + - label: ":linux: Packaging Linux ARM" + key: "package-linux-arm" + env: + PLATFORMS: "linux/arm64" + PACKAGES: "docker" + command: + - ".buildkite/filebeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Filebeat/Packaging: ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" +EOF +fi diff --git a/.buildkite/filebeat/scripts/package.sh b/.buildkite/filebeat/scripts/package.sh new file mode 100755 index 000000000000..0bb03250348c --- /dev/null +++ b/.buildkite/filebeat/scripts/package.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Start Packaging" +cd filebeat +umask 0022 +mage package diff --git a/.buildkite/filebeat/scripts/unit-tests-win.ps1 b/.buildkite/filebeat/scripts/unit-tests-win.ps1 new file mode 100644 index 000000000000..8990eb30a093 --- /dev/null +++ b/.buildkite/filebeat/scripts/unit-tests-win.ps1 @@ -0,0 +1,51 @@ +$ErrorActionPreference = "Stop" # set -e +$GoVersion = $env:GOLANG_VERSION # If Choco doesn't have the version specified in .go-version file, should be changed manually + +# Forcing to checkout again all the files with a correct autocrlf. +# Doing this here because we cannot set git clone options before. +function fixCRLF() { + Write-Host "-- Fixing CRLF in git checkout --" + git config core.autocrlf false + git rm --quiet --cached -r . + git reset --quiet --hard +} + +function withGolang() { + Write-Host "-- Install golang $GoVersion --" + choco install golang -y --version $GoVersion + + $choco = Convert-Path "$((Get-Command choco).Path)\..\.." + Import-Module "$choco\helpers\chocolateyProfile.psm1" + refreshenv + go version + go env +} + +function installGoDependencies() { + $installPackages = @( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "github.com/tebeka/go2xunit" + ) + foreach ($pkg in $installPackages) { + go install "$pkg" + } +} + +fixCRLF + +$ErrorActionPreference = "Continue" # set +e + +Set-Location -Path filebeat +New-Item -ItemType Directory -Force -Path "build" +withGolang +installGoDependencies + +mage build unitTest + +$EXITCODE=$LASTEXITCODE +$ErrorActionPreference = "Stop" + +Exit $EXITCODE diff --git a/.buildkite/filebeat/scripts/unit-tests.sh b/.buildkite/filebeat/scripts/unit-tests.sh new file mode 100755 index 000000000000..08ce9d4ea1c6 --- /dev/null +++ b/.buildkite/filebeat/scripts/unit-tests.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Executing Unit Tests" +sudo chmod -R go-w filebeat/ + +umask 0022 +mage -d filebeat unitTest diff --git a/.buildkite/hooks/post-checkout b/.buildkite/hooks/post-checkout new file mode 100644 index 000000000000..b6cc7ad60bda --- /dev/null +++ b/.buildkite/hooks/post-checkout @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -euo pipefail + +checkout_merge() { + local target_branch=$1 + local pr_commit=$2 + local merge_branch=$3 + + if [[ -z "${target_branch}" ]]; then + echo "No pull request target branch" + exit 1 + fi + + git fetch -v origin "${target_branch}" + git checkout FETCH_HEAD + echo "Current branch: $(git rev-parse --abbrev-ref HEAD)" + + # create temporal branch to merge the PR with the target branch + git checkout -b ${merge_branch} + echo "New branch created: $(git rev-parse --abbrev-ref HEAD)" + + # set author identity so it can be run git merge + git config user.name "github-merged-pr-post-checkout" + git config user.email "auto-merge@buildkite" + + git merge --no-edit "${BUILDKITE_COMMIT}" || { + local merge_result=$? + echo "Merge failed: ${merge_result}" + git merge --abort + exit ${merge_result} + } +} + +pull_request="${BUILDKITE_PULL_REQUEST:-false}" + +if [[ "${pull_request}" == "false" ]]; then + echo "Not a pull request, skipping" + exit 0 +fi + +TARGET_BRANCH="${BUILDKITE_PULL_REQUEST_BASE_BRANCH:-master}" +PR_COMMIT="${BUILDKITE_COMMIT}" +PR_ID=${BUILDKITE_PULL_REQUEST} +MERGE_BRANCH="pr_merge_${PR_ID}" + +checkout_merge "${TARGET_BRANCH}" "${PR_COMMIT}" "${MERGE_BRANCH}" + +echo "Commit information" +git --no-pager log --format=%B -n 1 + +# Ensure buildkite groups are rendered +echo "" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command new file mode 100644 index 000000000000..d4b33be7690c --- /dev/null +++ b/.buildkite/hooks/pre-command @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" || "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" ]]; then + source .buildkite/env-scripts/env.sh + source .buildkite/env-scripts/util.sh + source .buildkite/env-scripts/win-env.sh + + if [[ -z "${GOLANG_VERSION-""}" ]]; then + export GOLANG_VERSION=$(cat "${WORKSPACE}/.go-version") + fi +fi + +if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + source .buildkite/scripts/setenv.sh + if [[ "${BUILDKITE_COMMAND}" =~ ^buildkite-agent ]]; then + echo "Skipped pre-command when running the Upload pipeline" + exit 0 + fi +fi diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index 34321b61161b..1fb185b59d19 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -1,5 +1,45 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" + GCP_HI_PERF_MASHINE_TYPE: "c2d-highcpu-16" + GCP_WIN_MACHINE_TYPE: "n2-standard-8" + AWS_ARM_INSTANCE_TYPE: "t4g.xlarge" + BEATS_PROJECT_NAME: "libbeat" + steps: - - label: "Example test" - command: echo "Hello!" + + - input: "Input Parameters" + key: "input-run-all-stages" + fields: + - select: "Libbeat - runLibbeat" + key: "runLibbeat" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + - select: "Libbeat - runLibBeatArmTest" + key: "runLibBeatArmTest" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + if: "build.source == 'ui'" + + - wait: ~ + if: "build.source == 'ui'" + allow_dependency_failure: false + + - label: ":linux: Load dynamic Libbeat pipeline" + key: "libbeat-pipeline" + command: ".buildkite/scripts/generate_libbeat_pipeline.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 34321b61161b..c42f17d2a363 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -1,5 +1,52 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + IMAGE_WIN_10: "family/general-windows-10" + IMAGE_WIN_11: "family/general-windows-11" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + GO_AGENT_IMAGE: "golang:${GO_VERSION}" + BEATS_PROJECT_NAME: "metricbeat" + GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" + GCP_HI_PERF_MASHINE_TYPE: "c2d-highcpu-16" + GCP_WIN_MACHINE_TYPE: "n2-standard-8" + AWS_ARM_INSTANCE_TYPE: "t4g.xlarge" + steps: - - label: "Example test" - command: echo "Hello!" + + - input: "Input Parameters" + key: "runMetricbeat" + fields: + - select: "Metricbeat - runMetricbeat" + key: "runMetricbeat" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + - select: "Metricbeat - runMetricbeatMacOsTests" + key: "runMetricbeatMacOsTests" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + if: "build.source == 'ui'" + + - wait: ~ + if: "build.source == 'ui'" + allow_dependency_failure: false + + - label: ":linux: Load dynamic metricbeat pipeline" + key: "metricbeat-pipeline" + command: ".buildkite/scripts/generate_metricbeat_pipeline.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index cc8ff9ab7a52..66c508e252c9 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -25,12 +25,12 @@ "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))|^/test filebeat$", - "always_trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))|^/test filebeat$", + "trigger_comment_regex": "^/test filebeat(for (arm|macos|windows|extended support))?$|^/packag[ing|e]$", + "always_trigger_comment_regex": "^/test filebeat(for (arm|macos|windows|extended support))?$|^/package filebeat$", "skip_ci_labels": [ ], "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], - "always_require_ci_on_changed": [ ] + "always_require_ci_on_changed": ["^filebeat/.*", ".buildkite/filebeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*" ] }, { "enabled": true, @@ -57,8 +57,8 @@ "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "^/test auditbeat$", - "always_trigger_comment_regex": "^/test auditbeat$", + "trigger_comment_regex": "^/test auditbeat(for (arm|macos|windows|extended support))?$", + "always_trigger_comment_regex": "^/test auditbeat(for (arm|macos|windows|extended support))?$", "skip_ci_labels": [ ], "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], @@ -127,6 +127,38 @@ "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], "always_require_ci_on_changed": ["^packetbeat/.*", ".buildkite/packetbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "beats-xpack-elastic-agent", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test elastic-agent$", + "always_trigger_comment_regex": "^/test elastic-agent$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": ["^x-pack/elastic-agent/README.md", "^x-pack/elastic-agent/docs/.*", "^x-pack/elastic-agent/devtools/.*" ], + "always_require_ci_on_changed": ["^x-pack/elastic-agent/.*", ".buildkite/x-pack/elastic-agent/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "beats-winlogbeat", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test winlogbeat$", + "always_trigger_comment_regex": "^/test winlogbeat$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": ["^winlogbeat/.*", ".buildkite/winlogbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] } ] } diff --git a/.buildkite/scripts/common.sh b/.buildkite/scripts/common.sh new file mode 100755 index 000000000000..e3dd2ec4ac41 --- /dev/null +++ b/.buildkite/scripts/common.sh @@ -0,0 +1,245 @@ +#!/usr/bin/env bash +set -euo pipefail + +WORKSPACE=${WORKSPACE:-"$(pwd)"} +BIN="${WORKSPACE}/bin" +platform_type="$(uname)" +platform_type_lowercase=$(echo "$platform_type" | tr '[:upper:]' '[:lower:]') +arch_type="$(uname -m)" +GITHUB_PR_TRIGGER_COMMENT=${GITHUB_PR_TRIGGER_COMMENT:-""} +ONLY_DOCS=${ONLY_DOCS:-"true"} +runLibbeat="$(buildkite-agent meta-data get runLibbeat --default ${runLibbeat:-"false"})" +runMetricbeat="$(buildkite-agent meta-data get runMetricbeat --default ${runMetricbeat:-"false"})" +runLibBeatArmTest="$(buildkite-agent meta-data get runLibbeat --default ${runLibbeat:-"false"})" +runMetricbeatMacOsTests="$(buildkite-agent meta-data get runMetricbeatMacOsTests --default ${runMetricbeatMacOsTests:-"false"})" + +metricbeat_changeset=( + "^metricbeat/.*" + "^go.mod" + "^pytest.ini" + "^dev-tools/.*" + "^libbeat/.*" + "^testing/.*" + ) +oss_changeset=( + "^go.mod" + "^pytest.ini" + "^dev-tools/.*" + "^libbeat/.*" + "^testing/.*" +) +ci_changeset=( + "^.buildkite/.*" +) +go_mod_changeset=( + "^go.mod" + ) +docs_changeset=( + ".*\\.(asciidoc|md)" + "deploy/kubernetes/.*-kubernetes\\.yaml" + ) +packaging_changeset=( + "^dev-tools/packaging/.*" + ".go-version" + ) + +with_docker_compose() { + local version=$1 + echo "Setting up the Docker-compose environment..." + create_workspace + retry 3 curl -sSL -o ${BIN}/docker-compose "https://github.com/docker/compose/releases/download/${version}/docker-compose-${platform_type_lowercase}-${arch_type}" + chmod +x ${BIN}/docker-compose + export PATH="${BIN}:${PATH}" + docker-compose version +} + +create_workspace() { + if [[ ! -d "${BIN}" ]]; then + mkdir -p "${BIN}" + fi +} + +add_bin_path() { + echo "Adding PATH to the environment variables..." + create_workspace + export PATH="${BIN}:${PATH}" +} + +check_platform_architeture() { + case "${arch_type}" in + "x86_64") + go_arch_type="amd64" + ;; + "aarch64") + go_arch_type="arm64" + ;; + "arm64") + go_arch_type="arm64" + ;; + *) + echo "The current platform/OS type is unsupported yet" + ;; + esac +} + +with_mage() { + local install_packages=( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "gotest.tools/gotestsum" + ) + create_workspace + for pkg in "${install_packages[@]}"; do + go install "${pkg}@latest" + done +} + +with_go() { + echo "Setting up the Go environment..." + create_workspace + check_platform_architeture + retry 5 curl -sL -o "${BIN}/gvm" "https://github.com/andrewkroh/gvm/releases/download/${SETUP_GVM_VERSION}/gvm-${platform_type_lowercase}-${go_arch_type}" + chmod +x "${BIN}/gvm" + eval "$(gvm $GO_VERSION)" + go version + which go + local go_path="$(go env GOPATH):$(go env GOPATH)/bin" + export PATH="${go_path}:${PATH}" +} + +with_python() { + if [ "${platform_type}" == "Linux" ]; then + #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) + sudo apt-get update + sudo apt-get install -y python3-pip python3-venv + elif [ "${platform_type}" == "Darwin" ]; then + brew update + pip3 install virtualenv + ulimit -Sn 10000 + fi +} + +with_dependencies() { + if [ "${platform_type}" == "Linux" ]; then + #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) + sudo apt-get update + sudo apt-get install -y libsystemd-dev libpcap-dev + elif [ "${platform_type}" == "Darwin" ]; then + pip3 install libpcap + fi +} + +retry() { + local retries=$1 + shift + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + >&2 echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + else + >&2 echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + done + return 0 +} + +are_paths_changed() { + local patterns=("${@}") + local changelist=() + + for pattern in "${patterns[@]}"; do + changed_files=($(git diff --name-only HEAD@{1} HEAD | grep -E "$pattern")) + if [ "${#changed_files[@]}" -gt 0 ]; then + changelist+=("${changed_files[@]}") + fi + done + + if [ "${#changelist[@]}" -gt 0 ]; then + echo "Files changed:" + echo "${changelist[*]}" + return 0 + else + echo "No files changed within specified changeset:" + echo "${patterns[*]}" + return 1 + fi +} + +are_changed_only_paths() { + local patterns=("${@}") + local changelist=() + local changed_files=$(git diff --name-only HEAD@{1} HEAD) + if [ -z "$changed_files" ] || grep -qE "$(IFS=\|; echo "${patterns[*]}")" <<< "$changed_files"; then + return 0 + fi + return 1 +} + +are_conditions_met_mandatory_tests() { + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then + if [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" || "${GITHUB_PR_LABELS}" =~ Metricbeat || "${runMetricbeat}" == "true" ]]; then + return 0 + fi + elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + if [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test libbeat" || "${GITHUB_PR_LABELS}" =~ libbeat || "${runLibbeat}" == "true" ]]; then + return 0 + fi + fi + fi + return 1 +} + +are_conditions_met_libbeat_arm_tests() { + if are_conditions_met_mandatory_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 + if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + if [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test libbeat for arm" || "${GITHUB_PR_LABELS}" =~ arm || "${runLibBeatArmTest}" == "true" ]]; then + return 0 + fi + fi + fi + return 1 +} + +are_conditions_met_metricbeat_macos_tests() { + if [[ "${runMetricbeatMacOsTests}" == true ]] || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat for macos" ]] || [[ "${GITHUB_PR_LABELS}" =~ macOS ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + return 0 + fi + return 1 +} + +are_conditions_met_packaging() { + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || [[ "${BUILDKITE_TAG}" == "" ]] || [[ "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L101-L103 + return 0 + fi + return 1 +} + +config_git() { + if [ -z "$(git config --get user.email)" ]; then + git config --global user.email "beatsmachine@users.noreply.github.com" + git config --global user.name "beatsmachine" + fi +} + +if ! are_changed_only_paths "${docs_changeset[@]}" ; then + ONLY_DOCS="false" + echo "Changes include files outside the docs_changeset vairiabe. ONLY_DOCS=$ONLY_DOCS." +else + echo "All changes are related to DOCS. ONLY_DOCS=$ONLY_DOCS." +fi + +if are_paths_changed "${go_mod_changeset[@]}" ; then + GO_MOD_CHANGES="true" +fi + +if are_paths_changed "${packaging_changeset[@]}" ; then + PACKAGING_CHANGES="true" +fi \ No newline at end of file diff --git a/.buildkite/scripts/crosscompile.sh b/.buildkite/scripts/crosscompile.sh new file mode 100755 index 000000000000..12f0f6574ca9 --- /dev/null +++ b/.buildkite/scripts/crosscompile.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Crosscompile for $BEATS_PROJECT_NAME" +make -C "${BEATS_PROJECT_NAME}" crosscompile diff --git a/.buildkite/scripts/generate_libbeat_pipeline.sh b/.buildkite/scripts/generate_libbeat_pipeline.sh new file mode 100755 index 000000000000..6da1bef711dc --- /dev/null +++ b/.buildkite/scripts/generate_libbeat_pipeline.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +pipelineName="pipeline.libbeat-dynamic.yml" + +echo "Add the mandatory and extended tests without additional conditions into the pipeline" +if are_conditions_met_mandatory_tests; then + cat > $pipelineName <<- YAML + +steps: + + - group: "Mandatory Tests" + key: "mandatory-tests" + steps: + - label: ":linux: Ubuntu Unit Tests" + key: "mandatory-linux-unit-test" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":go: Go Integration Tests" + key: "mandatory-int-test" + command: ".buildkite/scripts/go_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":python: Python Integration Tests" + key: "mandatory-python-int-test" + command: ".buildkite/scripts/py_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":negative_squared_cross_mark: Cross compile" + key: "mandatory-cross-compile" + command: ".buildkite/scripts/crosscompile.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: " ${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":testengine: Stress Tests" + key: "mandatory-stress-test" + command: ".buildkite/scripts/stress_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/libbeat-stress-test.xml" + +YAML +fi + +echo "Check and add the Extended Tests into the pipeline" +if are_conditions_met_libbeat_arm_tests; then + cat >> $pipelineName <<- YAML + + - group: "Extended Tests" + key: "extended-tests" + steps: + - label: ":linux: Arm64 Unit Tests" + key: "extended-arm64-unit-tests" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "${AWS_ARM_INSTANCE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + +YAML +fi + +echo "--- Printing dynamic steps" #TODO: remove if the pipeline is public +cat $pipelineName + +echo "--- Loading dynamic steps" +buildkite-agent pipeline upload $pipelineName diff --git a/.buildkite/scripts/generate_metricbeat_pipeline.sh b/.buildkite/scripts/generate_metricbeat_pipeline.sh new file mode 100755 index 000000000000..0ea19734c4fd --- /dev/null +++ b/.buildkite/scripts/generate_metricbeat_pipeline.sh @@ -0,0 +1,170 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +pipelineName="pipeline.metricbeat-dynamic.yml" + +echo "Add the mandatory and extended tests without additional conditions into the pipeline" +if are_conditions_met_mandatory_tests; then + cat > $pipelineName <<- YAML + +steps: + + - group: "Mandatory Tests" + key: "mandatory-tests" + steps: + - label: ":linux: Ubuntu Unit Tests" + key: "mandatory-linux-unit-test" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":go: Go Intergration Tests" + key: "mandatory-int-test" + command: ".buildkite/scripts/go_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":python: Python Integration Tests" + key: "mandatory-python-int-test" + command: ".buildkite/scripts/py_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":negative_squared_cross_mark: Cross compile" + key: "mandatory-cross-compile" + command: ".buildkite/scripts/crosscompile.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":windows: Windows 2016/2022 Unit Tests - {{matrix.image}}" + command: ".buildkite/scripts/win_unit_tests.ps1" + key: "mandatory-win-unit-tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2022}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + +# echo "Add the extended windows tests into the pipeline" +# TODO: ADD conditions from the main pipeline + + - group: "Extended Windows Tests" + key: "extended-win-tests" + steps: + - label: ":windows: Windows 2019 Unit Tests" + key: "extended-win-2019-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_2019}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + # Temporary disabled https://github.com/elastic/beats/issues/37841 + - label: ":windows: Windows 10 Unit Tests" + key: "extended-win-10-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_10}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":windows: Windows 11 Unit Tests" + key: "extended-win-11-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_11}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" +YAML +fi + +echo "Check and add the Extended Tests into the pipeline" +if are_conditions_met_metricbeat_macos_tests; then + cat >> $pipelineName <<- YAML + + - group: "Extended Tests" + key: "extended-tests" + steps: + - label: ":mac: MacOS Unit Tests" + key: "extended-macos-unit-tests" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" +YAML + +fi + +echo "Check and add the Packaging into the pipeline" +if are_conditions_met_mandatory_tests && are_conditions_met_packaging; then + cat >> $pipelineName <<- YAML + + - wait: ~ + depends_on: + - step: "mandatory-tests" + allow_failure: false + + - group: "Packaging" # TODO: check conditions for future the main pipeline migration: https://github.com/elastic/beats/pull/28589 + key: "packaging" + steps: + - label: ":linux: Packaging Linux" + key: "packaging-linux" + command: ".buildkite/scripts/packaging.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + + - label: ":linux: Packaging ARM" + key: "packaging-arm" + command: ".buildkite/scripts/packaging.sh" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "${AWS_ARM_INSTANCE_TYPE}" + env: + PLATFORMS: "linux/arm64" + PACKAGES: "docker" + +YAML +fi + +echo "--- Printing dynamic steps" #TODO: remove if the pipeline is public +cat $pipelineName + +echo "--- Loading dynamic steps" +buildkite-agent pipeline upload $pipelineName diff --git a/.buildkite/scripts/go_int_tests.sh b/.buildkite/scripts/go_int_tests.sh new file mode 100755 index 000000000000..b4c519f45126 --- /dev/null +++ b/.buildkite/scripts/go_int_tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Go Intergration Tests for $BEATS_PROJECT_NAME" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage goIntegTest + +popd > /dev/null diff --git a/.buildkite/scripts/install_tools.sh b/.buildkite/scripts/install_tools.sh new file mode 100644 index 000000000000..1a1e3a29f54e --- /dev/null +++ b/.buildkite/scripts/install_tools.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +echo "--- Env preparation" + +# Temporary solution to fix the issues with "sudo apt get...." https://elastic.slack.com/archives/C0522G6FBNE/p1706003603442859?thread_ts=1706003209.424539&cid=C0522G6FBNE +# It could be removed when we use our own image for the BK agent. +if [ "${platform_type}" == "Linux" ]; then + DEBIAN_FRONTEND="noninteractive" + #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) + sudo mkdir -p /etc/needrestart + echo "\$nrconf{restart} = 'a';" | sudo tee -a /etc/needrestart/needrestart.conf > /dev/null +fi + +add_bin_path + +if command -v docker-compose &> /dev/null +then + echo "Found docker-compose. Checking version.." + FOUND_DOCKER_COMPOSE_VERSION=$(docker-compose --version | awk '{print $4}'|sed s/\,//) + if [ $FOUND_DOCKER_COMPOSE_VERSION == $DOCKER_COMPOSE_VERSION ]; then + echo "Versions match. No need to install docker-compose. Exiting." + elif [[ "${platform_type}" == "Linux" && "${arch_type}" == "aarch64" ]]; then + with_docker_compose "${DOCKER_COMPOSE_VERSION_AARCH64}" + elif [[ "${platform_type}" == "Linux" && "${arch_type}" == "x86_64" ]]; then + with_docker_compose "${DOCKER_COMPOSE_VERSION}" + fi +else + with_docker_compose "${DOCKER_COMPOSE_VERSION}" +fi + +with_go "${GO_VERSION}" +with_mage +with_python +with_dependencies +config_git +mage dumpVariables + +#sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) +sudo chmod -R go-w "${BEATS_PROJECT_NAME}/" #TODO: Remove when the issue is solved https://github.com/elastic/beats/issues/37838 + +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +#TODO "umask 0022" has to be removed after our own image is ready (it has to be moved to the image) +umask 0022 # fix the filesystem permissions issue like this: https://buildkite.com/elastic/beats-metricbeat/builds/1329#018d3179-25a9-475b-a2c8-64329dfe092b/320-1696 + +popd > /dev/null diff --git a/.buildkite/scripts/packaging.sh b/.buildkite/scripts/packaging.sh new file mode 100755 index 000000000000..1539d3ab430c --- /dev/null +++ b/.buildkite/scripts/packaging.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Packaging for $BEATS_PROJECT_NAME" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage package + +popd > /dev/null diff --git a/.buildkite/scripts/py_int_tests.sh b/.buildkite/scripts/py_int_tests.sh new file mode 100755 index 000000000000..19fa8796c3e7 --- /dev/null +++ b/.buildkite/scripts/py_int_tests.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Python Intergration Tests for $BEATS_PROJECT_NAME" + +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage pythonIntegTest + +popd > /dev/null diff --git a/.buildkite/scripts/setenv.sh b/.buildkite/scripts/setenv.sh new file mode 100755 index 000000000000..901ba9891c20 --- /dev/null +++ b/.buildkite/scripts/setenv.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -euo pipefail + +SETUP_GVM_VERSION="v0.5.1" +DOCKER_COMPOSE_VERSION="1.21.0" +DOCKER_COMPOSE_VERSION_AARCH64="v2.21.0" +SETUP_WIN_PYTHON_VERSION="3.11.0" +GO_VERSION=$(cat .go-version) + +export SETUP_GVM_VERSION +export DOCKER_COMPOSE_VERSION +export DOCKER_COMPOSE_VERSION_AARCH64 +export SETUP_WIN_PYTHON_VERSION +export GO_VERSION diff --git a/.buildkite/scripts/stress_tests.sh b/.buildkite/scripts/stress_tests.sh new file mode 100755 index 000000000000..b177eb53ea6b --- /dev/null +++ b/.buildkite/scripts/stress_tests.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Stress Tests for $BEATS_PROJECT_NAME" + +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +make STRESS_TEST_OPTIONS='-timeout=20m -race -v -parallel 1' GOTEST_OUTPUT_OPTIONS='| go-junit-report > libbeat-stress-test.xml' stress-tests + +popd > /dev/null diff --git a/.buildkite/scripts/unit_tests.sh b/.buildkite/scripts/unit_tests.sh new file mode 100755 index 000000000000..059b4166e296 --- /dev/null +++ b/.buildkite/scripts/unit_tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Unit Tests" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage build unitTest + +popd > /dev/null diff --git a/.buildkite/scripts/win_unit_tests.ps1 b/.buildkite/scripts/win_unit_tests.ps1 new file mode 100644 index 000000000000..34833d183ffa --- /dev/null +++ b/.buildkite/scripts/win_unit_tests.ps1 @@ -0,0 +1,70 @@ +$ErrorActionPreference = "Stop" # set -e +$WorkFolder = "metricbeat" +# Forcing to checkout again all the files with a correct autocrlf. +# Doing this here because we cannot set git clone options before. +function fixCRLF { + Write-Host "-- Fixing CRLF in git checkout --" + git config core.autocrlf false + git rm --quiet --cached -r . + git reset --quiet --hard +} +function withChoco { + Write-Host "-- Configure Choco --" + $env:ChocolateyInstall = Convert-Path "$((Get-Command choco).Path)\..\.." + Import-Module "$env:ChocolateyInstall\helpers\chocolateyProfile.psm1" +} +function withGolang($version) { + Write-Host "-- Install golang $version --" + choco install -y golang --version=$version + refreshenv + go version +} +function withPython($version) { + Write-Host "-- Install Python $version --" + choco install python --version=$version + refreshenv + python --version +} +function withMinGW { + Write-Host "-- Install MinGW --" + choco install mingw -y + refreshenv +} +function installGoDependencies { + $installPackages = @( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report/v2" + "gotest.tools/gotestsum" + ) + foreach ($pkg in $installPackages) { + go install "$pkg@latest" + } +} + +fixCRLF + +withChoco + +withGolang $env:GO_VERSION + +installGoDependencies + +withPython $env:SETUP_WIN_PYTHON_VERSION + +withMinGW + +$ErrorActionPreference = "Continue" # set +e + +Push-Location $WorkFolder + +New-Item -ItemType Directory -Force -Path "build" +mage build unitTest + +Pop-Location + +$EXITCODE=$LASTEXITCODE +$ErrorActionPreference = "Stop" + +Exit $EXITCODE diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml b/.buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml new file mode 100644 index 000000000000..58d61a367a4a --- /dev/null +++ b/.buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml @@ -0,0 +1,6 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +# This pipeline is only for 7.17 branch. See catalog-info.yml +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 140ccf9d73f7..0df9a9a58779 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,7 +20,7 @@ CHANGELOG* /.ci/ @elastic/elastic-agent-data-plane /.github/ @elastic/elastic-agent-data-plane -/auditbeat/ @elastic/security-external-integrations +/auditbeat/ @elastic/sec-linux-platform /deploy/ @elastic/elastic-agent-data-plane /deploy/kubernetes @elastic/elastic-agent-data-plane @elastic/obs-cloudnative-monitoring /dev-tools/ @elastic/elastic-agent-data-plane @@ -28,26 +28,26 @@ CHANGELOG* /docs/ @elastic/elastic-agent-data-plane /filebeat @elastic/elastic-agent-data-plane /filebeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. -/filebeat/input/syslog/ @elastic/security-external-integrations -/filebeat/input/winlog/ @elastic/security-external-integrations +/filebeat/input/syslog/ @elastic/sec-deployment-and-devices +/filebeat/input/winlog/ @elastic/sec-windows-platform /filebeat/module/apache @elastic/obs-infraobs-integrations -/filebeat/module/auditd @elastic/security-external-integrations -/filebeat/module/elasticsearch/ @elastic/infra-monitoring-ui +/filebeat/module/auditd @elastic/sec-linux-platform +/filebeat/module/elasticsearch/ @elastic/stack-monitoring /filebeat/module/haproxy @elastic/obs-infraobs-integrations /filebeat/module/icinga # TODO: find right team /filebeat/module/iis @elastic/obs-infraobs-integrations /filebeat/module/kafka @elastic/obs-infraobs-integrations -/filebeat/module/kibana/ @elastic/infra-monitoring-ui -/filebeat/module/logstash/ @elastic/infra-monitoring-ui +/filebeat/module/kibana/ @elastic/stack-monitoring +/filebeat/module/logstash/ @elastic/stack-monitoring /filebeat/module/mongodb @elastic/obs-infraobs-integrations -/filebeat/module/mysql @elastic/security-external-integrations +/filebeat/module/mysql @elastic/obs-infraobs-integrations /filebeat/module/nats @elastic/obs-infraobs-integrations /filebeat/module/nginx @elastic/obs-infraobs-integrations -/filebeat/module/osquery @elastic/security-external-integrations -/filebeat/module/pensando @elastic/security-external-integrations +/filebeat/module/osquery @elastic/sec-deployment-and-devices +/filebeat/module/pensando @elastic/sec-deployment-and-devices /filebeat/module/postgresql @elastic/obs-infraobs-integrations /filebeat/module/redis @elastic/obs-infraobs-integrations -/filebeat/module/santa @elastic/security-external-integrations +/filebeat/module/santa @elastic/security-service-integrations /filebeat/module/system @elastic/elastic-agent-data-plane /filebeat/module/traefik # TODO: find right team /heartbeat/ @elastic/obs-ds-hosted-services @@ -55,13 +55,13 @@ CHANGELOG* /libbeat/ @elastic/elastic-agent-data-plane /libbeat/docs/processors-list.asciidoc @elastic/ingest-docs /libbeat/management @elastic/elastic-agent-control-plane -/libbeat/processors/cache/ @elastic/security-external-integrations -/libbeat/processors/community_id/ @elastic/security-external-integrations -/libbeat/processors/decode_xml/ @elastic/security-external-integrations -/libbeat/processors/decode_xml_wineventlog/ @elastic/security-external-integrations -/libbeat/processors/dns/ @elastic/security-external-integrations -/libbeat/processors/registered_domain/ @elastic/security-external-integrations -/libbeat/processors/translate_sid/ @elastic/security-external-integrations +/libbeat/processors/cache/ @elastic/security-service-integrations +/libbeat/processors/community_id/ @elastic/sec-deployment-and-devices +/libbeat/processors/decode_xml/ @elastic/security-service-integrations +/libbeat/processors/decode_xml_wineventlog/ @elastic/sec-windows-platform +/libbeat/processors/dns/ @elastic/sec-deployment-and-devices +/libbeat/processors/registered_domain/ @elastic/sec-deployment-and-devices +/libbeat/processors/translate_sid/ @elastic/sec-windows-platform /libbeat/processors/add_cloud_metadata @elastic/obs-cloud-monitoring /libbeat/processors/add_kubernetes_metadata @elastic/obs-cloudnative-monitoring /licenses/ @elastic/elastic-agent-data-plane @@ -69,20 +69,20 @@ CHANGELOG* /metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /metricbeat/helper/kubernetes @elastic/obs-cloudnative-monitoring /metricbeat/module/apache @elastic/obs-infraobs-integrations -/metricbeat/module/beat/ @elastic/infra-monitoring-ui +/metricbeat/module/beat/ @elastic/stack-monitoring /metricbeat/module/ceph @elastic/obs-infraobs-integrations /metricbeat/module/couchbase @elastic/obs-infraobs-integrations /metricbeat/module/couchdb @elastic/obs-infraobs-integrations -/metricbeat/module/elasticsearch/ @elastic/infra-monitoring-ui +/metricbeat/module/elasticsearch/ @elastic/stack-monitoring /metricbeat/module/etcd @elastic/obs-infraobs-integrations /metricbeat/module/golang @elastic/obs-infraobs-integrations /metricbeat/module/haproxy @elastic/obs-infraobs-integrations /metricbeat/module/http @elastic/obs-infraobs-integrations /metricbeat/module/jolokia @elastic/obs-infraobs-integrations /metricbeat/module/kafka @elastic/obs-infraobs-integrations -/metricbeat/module/kibana/ @elastic/infra-monitoring-ui +/metricbeat/module/kibana/ @elastic/stack-monitoring /metricbeat/module/kubernetes/ @elastic/obs-cloudnative-monitoring -/metricbeat/module/logstash/ @elastic/infra-monitoring-ui +/metricbeat/module/logstash/ @elastic/stack-monitoring /metricbeat/module/memcached @elastic/obs-infraobs-integrations /metricbeat/module/mongodb @elastic/obs-infraobs-integrations /metricbeat/module/mysql @elastic/obs-infraobs-integrations @@ -96,83 +96,85 @@ CHANGELOG* /metricbeat/module/system/ @elastic/elastic-agent-data-plane /metricbeat/module/vsphere @elastic/obs-infraobs-integrations /metricbeat/module/zookeeper @elastic/obs-infraobs-integrations -/packetbeat/ @elastic/security-external-integrations +/packetbeat/ @elastic/sec-linux-platform /script/ @elastic/elastic-agent-data-plane /testing/ @elastic/elastic-agent-data-plane /tools/ @elastic/elastic-agent-data-plane -/winlogbeat/ @elastic/security-external-integrations -/x-pack/auditbeat/ @elastic/security-external-integrations +/winlogbeat/ @elastic/sec-windows-platform +/x-pack/auditbeat/ @elastic/sec-linux-platform /x-pack/elastic-agent/ @elastic/elastic-agent-control-plane /x-pack/filebeat @elastic/elastic-agent-data-plane /x-pack/filebeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /x-pack/filebeat/input/awscloudwatch/ @elastic/obs-cloud-monitoring /x-pack/filebeat/input/awss3/ @elastic/obs-cloud-monitoring -/x-pack/filebeat/input/azureblobstorage/ @elastic/security-external-integrations +/x-pack/filebeat/input/azureblobstorage/ @elastic/security-service-integrations /x-pack/filebeat/input/azureeventhub/ @elastic/obs-cloud-monitoring -/x-pack/filebeat/input/cel/ @elastic/security-external-integrations +/x-pack/filebeat/input/cel/ @elastic/security-service-integrations /x-pack/filebeat/input/cometd/ @elastic/obs-infraobs-integrations -/x-pack/filebeat/input/entityanalytics/ @elastic/security-external-integrations -/x-pack/filebeat/input/gcppubsub/ @elastic/security-external-integrations -/x-pack/filebeat/input/gcs/ @elastic/security-external-integrations -/x-pack/filebeat/input/http_endpoint/ @elastic/security-external-integrations -/x-pack/filebeat/input/httpjson/ @elastic/security-external-integrations -/x-pack/filebeat/input/internal/httplog @elastic/security-external-integrations -/x-pack/filebeat/input/internal/httpmon @elastic/security-external-integrations -/x-pack/filebeat/input/lumberjack/ @elastic/security-external-integrations -/x-pack/filebeat/input/netflow/ @elastic/security-external-integrations -/x-pack/filebeat/input/o365audit/ @elastic/security-external-integrations +/x-pack/filebeat/input/entityanalytics/ @elastic/security-service-integrations +/x-pack/filebeat/input/gcppubsub/ @elastic/security-service-integrations +/x-pack/filebeat/input/gcs/ @elastic/security-service-integrations +/x-pack/filebeat/input/http_endpoint/ @elastic/security-service-integrations +/x-pack/filebeat/input/httpjson/ @elastic/security-service-integrations +/x-pack/filebeat/input/internal/httplog @elastic/security-service-integrations +/x-pack/filebeat/input/internal/httpmon @elastic/security-service-integrations +/x-pack/filebeat/input/lumberjack/ @elastic/security-service-integrations +/x-pack/filebeat/input/netflow/ @elastic/sec-deployment-and-devices +/x-pack/filebeat/input/o365audit/ @elastic/security-service-integrations /x-pack/filebeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/filebeat/module/aws @elastic/obs-cloud-monitoring /x-pack/filebeat/module/awsfargate @elastic/obs-cloud-monitoring /x-pack/filebeat/module/azure @elastic/obs-cloud-monitoring -/x-pack/filebeat/module/barracuda @elastic/security-external-integrations -/x-pack/filebeat/module/bluecoat @elastic/security-external-integrations -/x-pack/filebeat/module/cef @elastic/security-external-integrations -/x-pack/filebeat/module/checkpoint @elastic/security-external-integrations -/x-pack/filebeat/module/cisco @elastic/security-external-integrations -/x-pack/filebeat/module/coredns @elastic/security-external-integrations -/x-pack/filebeat/module/crowdstrike @elastic/security-external-integrations -/x-pack/filebeat/module/cyberarkpas @elastic/security-external-integrations -/x-pack/filebeat/module/cylance @elastic/security-external-integrations -/x-pack/filebeat/module/envoyproxy @elastic/security-external-integrations -/x-pack/filebeat/module/f5 @elastic/security-external-integrations -/x-pack/filebeat/module/fortinet @elastic/security-external-integrations -/x-pack/filebeat/module/gcp @elastic/security-external-integrations -/x-pack/filebeat/module/google_workspace @elastic/security-external-integrations +/x-pack/filebeat/module/barracuda @elastic/security-service-integrations +/x-pack/filebeat/module/bluecoat @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/cef @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/checkpoint @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/cisco @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/coredns @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/crowdstrike @elastic/security-service-integrations +/x-pack/filebeat/module/cyberarkpas @elastic/security-service-integrations +/x-pack/filebeat/module/cylance @elastic/security-service-integrations +/x-pack/filebeat/module/envoyproxy @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/f5 @elastic/security-service-integrations +/x-pack/filebeat/module/fortinet @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/gcp @elastic/security-service-integrations +/x-pack/filebeat/module/google_workspace @elastic/security-service-integrations /x-pack/filebeat/module/ibmmq @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/imperva @elastic/security-external-integrations -/x-pack/filebeat/module/infoblox @elastic/security-external-integrations -/x-pack/filebeat/module/iptables @elastic/security-external-integrations -/x-pack/filebeat/module/juniper @elastic/security-external-integrations -/x-pack/filebeat/module/microsoft @elastic/security-external-integrations -/x-pack/filebeat/module/misp @elastic/security-external-integrations +/x-pack/filebeat/module/imperva @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/infoblox @elastic/security-service-integrations +/x-pack/filebeat/module/iptables @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/juniper @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/microsoft @elastic/sec-windows-platform +/x-pack/filebeat/module/misp @elastic/security-service-integrations /x-pack/filebeat/module/mssql @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/mysqlenterprise @elastic/security-external-integrations -/x-pack/filebeat/module/netflow @elastic/security-external-integrations -/x-pack/filebeat/module/netscout @elastic/security-external-integrations -/x-pack/filebeat/module/o365 @elastic/security-external-integrations -/x-pack/filebeat/module/okta @elastic/security-external-integrations -/x-pack/filebeat/module/oracle @elastic/security-external-integrations -/x-pack/filebeat/module/panw @elastic/security-external-integrations -/x-pack/filebeat/module/proofpoint @elastic/security-external-integrations +/x-pack/filebeat/module/mysqlenterprise @elastic/sec-windows-platform +/x-pack/filebeat/module/netflow @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/netscout @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/o365 @elastic/security-service-integrations +/x-pack/filebeat/module/okta @elastic/security-service-integrations +/x-pack/filebeat/module/oracle @elastic/obs-infraobs-integrations +/x-pack/filebeat/module/panw @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/proofpoint @elastic/security-service-integrations /x-pack/filebeat/module/rabbitmq @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/radware @elastic/security-external-integrations +/x-pack/filebeat/module/radware @elastic/sec-deployment-and-devices /x-pack/filebeat/module/salesforce @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/snort @elastic/security-external-integrations -/x-pack/filebeat/module/snyk @elastic/security-external-integrations -/x-pack/filebeat/module/sonicwall @elastic/security-external-integrations -/x-pack/filebeat/module/sophos @elastic/security-external-integrations -/x-pack/filebeat/module/squid @elastic/security-external-integrations -/x-pack/filebeat/module/suricata @elastic/security-external-integrations -/x-pack/filebeat/module/threatintel @elastic/security-external-integrations -/x-pack/filebeat/module/tomcat @elastic/security-external-integrations -/x-pack/filebeat/module/zeek @elastic/security-external-integrations +/x-pack/filebeat/module/snort @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/snyk @elastic/security-service-integrations +/x-pack/filebeat/module/sonicwall @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/sophos @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/squid @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/suricata @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/threatintel @elastic/security-service-integrations +/x-pack/filebeat/module/tomcat @elastic/obs-infraobs-integrations +/x-pack/filebeat/module/zeek @elastic/sec-deployment-and-devices /x-pack/filebeat/module/zookeeper @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/zoom @elastic/security-external-integrations -/x-pack/filebeat/module/zscaler @elastic/security-external-integrations -/x-pack/filebeat/modules.d/zoom.yml.disabled @elastic/security-external-integrations -/x-pack/filebeat/processors/decode_cef/ @elastic/security-external-integrations +/x-pack/filebeat/module/zoom @elastic/security-service-integrations +/x-pack/filebeat/module/zscaler @elastic/security-service-integrations +/x-pack/filebeat/modules.d/zoom.yml.disabled @elastic/security-service-integrations +/x-pack/filebeat/processors/decode_cef/ @elastic/sec-deployment-and-devices /x-pack/heartbeat/ @elastic/obs-ds-hosted-services +/x-pack/libbeat/reader/parquet/ @elastic/security-service-integrations +/x-pack/libbeat/reader/etw/ @elastic/sec-windows-platform /x-pack/metricbeat/ @elastic/elastic-agent-data-plane /x-pack/metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /x-pack/metricbeat/module/activemq @elastic/obs-infraobs-integrations @@ -186,7 +188,7 @@ CHANGELOG* /x-pack/metricbeat/module/containerd/ @elastic/obs-cloudnative-monitoring /x-pack/metricbeat/module/coredns @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/enterprisesearch @elastic/ent-search-application-backend -/x-pack/metricbeat/module/gcp @elastic/obs-ds-hosted-services @elastic/obs-infraobs-integrations @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp @elastic/obs-ds-hosted-services @elastic/obs-infraobs-integrations @elastic/security-service-integrations /x-pack/metricbeat/module/gcp/billing @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/cloudrun_metrics @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/cloudsql_mysql @elastic/obs-infraobs-integrations @@ -195,16 +197,16 @@ CHANGELOG* /x-pack/metricbeat/module/gcp/carbon @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/compute @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/dataproc @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/gcp/dns @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/dns @elastic/security-service-integrations /x-pack/metricbeat/module/gcp/firestore @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/gcp/firewall @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/firewall @elastic/security-service-integrations /x-pack/metricbeat/module/gcp/gke @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/loadbalancing_logs @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/loadbalancing_metrics @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/pubsub @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/redis @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/storage @elastic/obs-ds-hosted-services -/x-pack/metricbeat/module/gcp/vpcflow @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/vpcflow @elastic/security-service-integrations /x-pack/metricbeat/module/ibmmq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/iis @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/istio/ @elastic/obs-cloudnative-monitoring @@ -216,7 +218,6 @@ CHANGELOG* /x-pack/metricbeat/module/statsd @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/stan/ @elastic/obs-cloudnative-monitoring /x-pack/metricbeat/module/tomcat @elastic/obs-infraobs-integrations -/x-pack/osquerybeat/ @elastic/security-external-integrations -/x-pack/packetbeat/ @elastic/security-external-integrations -/x-pack/winlogbeat/ @elastic/security-external-integrations -/x-pack/libbeat/reader/parquet/ @elastic/security-external-integrations +/x-pack/osquerybeat/ @elastic/sec-deployment-and-devices +/x-pack/packetbeat/ @elastic/sec-linux-platform +/x-pack/winlogbeat/ @elastic/sec-windows-platform diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 994a24bfb490..304f3add387e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -29,7 +29,7 @@ updates: - dependency-name: go.elastic.co/go-licence-detector # Team:Service-Integrations - dependency-name: github.com/elastic/bayeux - # Team:Security-External Integrations + # Team:Security-Linux Platform - dependency-name: github.com/elastic/go-libaudit/* - dependency-name: github.com/elastic/go-perf - dependency-name: github.com/elastic/go-seccomp-bpf diff --git a/.github/workflows/opentelemetry.yml b/.github/workflows/opentelemetry.yml index 4cdb1e2197eb..84a6209ff2c9 100644 --- a/.github/workflows/opentelemetry.yml +++ b/.github/workflows/opentelemetry.yml @@ -1,46 +1,16 @@ +--- +# Look up results at https://ela.st/oblt-ci-cd-stats. +# There will be one service per GitHub repository, including the org name, and one Transaction per Workflow. name: OpenTelemetry Export Trace on: workflow_run: - workflows: - - bump-elastic-stack-snapshot - - bump-golang - - check-auditbeat - - check-default - - check-dev-tools - - check-docs - - check-filebeat - - check-heartbeat - - check-libbeat - - check-metricbeat - - check-packetbeat - - check-winlogbeat - - check-x-pack-auditbeat - - check-x-pack-dockerlogbeat - - check-x-pack-filebeat - - check-x-pack-functionbeat - - check-x-pack-heartbeat - - check-x-pack-libbeat - - check-x-pack-metricbeat - - check-x-pack-osquerybeat - - check-x-pack-packetbeat - - check-x-pack-winlogbeat - - golangci-lint - - notify-stalled-snapshots - - auditbeat - - filebeat - - heartbeat - - metricbeat - - packetbeat - - x-pack-auditbeat - - x-pack-filebeat - - x-pack-functionbeat - - x-pack-heartbeat - - x-pack-metricbeat - - x-pack-osquerybeat - - x-pack-packetbeat + workflows: [ "*" ] types: [completed] +permissions: + contents: read + jobs: otel-export-trace: runs-on: ubuntu-latest diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 4e650a193d16..d27a957b0f3e 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -87,6 +87,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Fix ingest pipeline for panw module to parse url scheme correctly {pull}35757[35757] - Renamed an httpjson input metric to follow naming conventions. `httpjson_interval_pages_total` was renamed to `httpjson_interval_pages` because the `_total` suffix is reserved for counters. {issue}35933[35933] {pull}36169[36169] - Fixed some race conditions in tests {pull}36185[36185] +- Fix Stringer implementation of fingerprint processor {issue}35174[35174] - Re-enable HTTPJSON fixed flakey test. {issue}34929[34929] {pull}36525[36525] - Make winlogbeat/sys/wineventlog follow the unsafe.Pointer rules. {pull}36650[36650] - Cleaned up documentation errors & fixed a minor bug in Filebeat Azure blob storage input. {pull}36714[36714] diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 3305b1989b69..cb5aa3b6354b 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,10 +3,73 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.12.1]] +=== Beats version 8.12.1 +https://github.com/elastic/beats/compare/v8.12.0\...v8.12.1[View commits] + +==== Known Issues + +*Affecting all Beats* + +Performance regression in AWS S3 inputs using SQS notification. + +In 8.12 the default memory queue flush interval was raised from 1 second to 10 seconds. In many configurations this improves performance because it allows the output to batch more events per round trip, which improves efficiency. However, the SQS input has an extra bottleneck that interacts badly with the new value. For more details see {issue}37754[37754]. + +If you are using the Elasticsearch output, and your output configuration uses a performance preset, switch it to `preset: latency`. If you use no preset or use `preset: custom`, then set `queue.mem.flush.timeout: 1s` in your queue or output configuration. + +==== Breaking changes + +*Affecting all Beats* + +- add_cloud_metadata processor: `huawei` provider is now treated as `openstack`. Huawei cloud runs on OpenStack +platform, and when viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you +know that your deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, +you can achieve this by overwriting the value using an `add_fields` processor. {pull}35184[35184] + +==== Bugfixes + +*Affecting all Beats* + +- aws: Add credential caching for `AssumeRole` session tokens. {issue}37787[37787] +- Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments. {pull}[37816][37816] + +*Filebeat* + +- Fix nil pointer dereference in the httpjson input. {pull}37591[37591] +- Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] +- Fix m365_defender cursor value and query building. {pull}37116[37116] +- Update github.com/lestrrat-go/jwx dependency. {pull}37799[37799] + +*Heartbeat* + +- Fix setuid root when running under cgroups v2. {pull}37794[37794] + +*Metricbeat* + +- Fix Azure Resource Metrics missing metrics (min and max aggregations) after upgrade to 8.11.3. {issue}37642[37642] {pull}37643[37643] + +==== Added + +*Filebeat* + +- Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] + [[release-notes-8.12.0]] === Beats version 8.12.0 https://github.com/elastic/beats/compare/v8.11.4\...v8.12.0[View commits] +==== Known Issues + +*Affecting all Beats* + +Performance regression in AWS S3 inputs using SQS notification. + +In 8.12 the default memory queue flush interval was raised from 1 second to 10 seconds. In many configurations this improves performance because it allows the output to batch more events per round trip, which improves efficiency. However, the SQS input has an extra bottleneck that interacts badly with the new value. For more details see {issue}37754[37754]. + +If you are using the Elasticsearch output, and your output configuration uses a performance preset, switch it to `preset: latency`. If you use no preset or use `preset: custom`, then set `queue.mem.flush.timeout: 1s` in your queue or output configuration. + +If you are not using the Elasticsearch output, set `queue.mem.flush.timeout: 1s` in your queue or output configuration. + ==== Breaking changes *Heartbeat* diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c00d18691e6e..1a0454f3eb44 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -11,6 +11,13 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Affecting all Beats* - Upgrade to Go 1.21.7. Removes support for Windows 8.1. See https://tip.golang.org/doc/go1.21#windows. {pull}37913[37913] +- add_cloud_metadata processor: `huawei` provider is now treated as `openstack`. Huawei cloud runs on OpenStack +platform, and when viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you +know that your deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, +you can achieve this by overwriting the value using an `add_fields` processor. {pull}35184[35184] + - In managed mode, Beats running under Elastic Agent will report the package +version of Elastic Agent as their own version. This includes all additional +fields added to events containing the Beats version. {pull}37553[37553] *Auditbeat* @@ -55,6 +62,11 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix panic when MaxRetryInterval is specified, but RetryInterval is not {pull}35820[35820] - Support build of projects outside of beats directory {pull}36126[36126] - Support Elastic Agent control protocol chunking support {pull}37343[37343] +- Upgrade elastic-agent-libs to v0.7.5. Removes obsolete "Treating the CommonName field on X.509 certificates as a host name..." deprecation warning for 8.0. {pull}37755[37755] +- aws: Add credential caching for `AssumeRole` session tokens. {issue}37787[37787] +- Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] +- Set timeout of 1 minute for FQDN requests {pull}37756[37756] + *Auditbeat* @@ -74,27 +86,15 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix handling of Juniper SRX structured data when there is no leading junos element. {issue}36270[36270] {pull}36308[36308] - Fix Filebeat Cisco module with missing escape character {issue}36325[36325] {pull}36326[36326] - Added a fix for Crowdstrike pipeline handling process arrays {pull}36496[36496] +- Fix m365_defender cursor value and query building. {pull}37116[37116] +- Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] +- Update github.com/lestrrat-go/jwx dependency. {pull}37799[37799] *Heartbeat* -- Fix panics when parsing dereferencing invalid parsed url. {pull}34702[34702] *Metricbeat* -- in module/windows/perfmon, changed collection method of the second counter value required to create a displayable value {pull}32305[32305] -- Fix and improve AWS metric period calculation to avoid zero-length intervals {pull}32724[32724] -- Add missing cluster metadata to k8s module metricsets {pull}32979[32979] {pull}33032[33032] -- Add GCP CloudSQL region filter {pull}32943[32943] -- Fix logstash cgroup mappings {pull}33131[33131] -- Remove unused `elasticsearch.node_stats.indices.bulk.avg_time.bytes` mapping {pull}33263[33263] -- Make generic SQL GA {pull}34637[34637] -- Collect missing remote_cluster in elasticsearch ccr metricset {pull}34957[34957] -- Add context with timeout in AWS API calls {pull}35425[35425] -- Fix EC2 host.cpu.usage {pull}35717[35717] -- Add option in SQL module to execute queries for all dbs. {pull}35688[35688] -- Add remaining dimensions for azure storage account to make them available for tsdb enablement. {pull}36331[36331] -- Add log error when statsd server fails to start {pull}36477[36477] -- Fix Azure Resource Metrics missing metrics (min and max aggregations) after upgrade to 8.11.3 {issue}37642[37642] {pull}37643[37643] *Osquerybeat* @@ -105,7 +105,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* - *Elastic Logging Plugin* @@ -129,9 +128,11 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Auditbeat* +- Add linux capabilities to processes in the system/process. {pull}37453[37453] *Filebeat* +- Update SQL input documentation regarding Oracle DSNs {pull}37590[37590] - add documentation for decode_xml_wineventlog processor field mappings. {pull}32456[32456] - httpjson input: Add request tracing logger. {issue}32402[32402] {pull}32412[32412] - Add cloudflare R2 to provider list in AWS S3 input. {pull}32620[32620] @@ -167,15 +168,24 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Update CEL extensions library to v1.7.0. {pull}37172[37172] - Add support for complete URL replacement in HTTPJSON chain steps. {pull}37486[37486] - Add support for user-defined query selection in EntraID entity analytics provider. {pull}37653[37653] +- Update CEL extensions library to v1.8.0 to provide runtime error location reporting. {issue}37304[37304] {pull}37718[37718] +- Add request trace logging for chained API requests. {issue}37551[36551] {pull}37682[37682] +- Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] +- Add support for PEM-based Okta auth in HTTPJSON. {pull}37772[37772] +- Prevent complete loss of long request trace data. {issue}37826[37826] {pull}37836[37836] +- Add support for PEM-based Okta auth in CEL. {pull}37813[37813] *Auditbeat* *Libbeat* +- Add watcher that can be used to monitor Linux kernel events. {pull}37833[37833] + +- Added support for ETW reader. {pull}36914[36914] *Heartbeat* - Added status to monitor run log report. - +- Upgrade github.com/elastic/go-elasticsearch/v8 to v8.12.0. {pull}37673[37673] *Metricbeat* @@ -191,11 +201,19 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add linux IO metrics to system/process {pull}37213[37213] - Add new memory/cgroup metrics to Kibana module {pull}37232[37232] + +*Metricbeat* + +- Update `getOpTimestamp` in `replstatus` to fix sort and temp files generation issue in mongodb. {pull}37688[37688] + *Osquerybeat* *Packetbeat* +- Bump Windows Npcap version to v1.79. {pull}37733[37733] +- Add metrics for TCP flags. {issue}36992[36992] {pull}36975[36975] +- Add support for pipeline loading. {pull}37291[37291] *Packetbeat* @@ -283,6 +301,9 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d + + + diff --git a/NOTICE.txt b/NOTICE.txt index c7e77209c2c2..573e544bb2e8 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12255,6 +12255,32 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/ebpfevents +Version: v0.3.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/ebpfevents@v0.3.2/LICENSE.txt: + +The https://github.com/elastic/ebpfevents repository contains source code under +various licenses: + +- Source code in the 'headers/bpf' directory, is dual-licensed under the GNU Lesser General + Public License version 2.1 (LICENSES/LGPL-2.1-only.txt) OR BSD-2-Clause license + (LICENSES/BSD-2-Clause.txt) + +- Source code in the 'ebpf' submodule is licensed with multiple licenses. Read more at + https://github.com/elastic/ebpf/blob/main/LICENSE.txt. + +- The binary files 'bpf_bpfel_x86.o' and 'bpf_bpfel_amd64.o' are compiled + from dual-licensed GPL-2.0-only OR BSD-2-Clause licensed code, and are distributed with + the GPL-2.0-only License (LICENSES/GPL-2.0-only.txt). + +- Source code not listed in the previous points is licensed under the Apache License, + version 2 (LICENSES/Apache-2.0.txt). + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover Version: v0.6.7 @@ -12468,11 +12494,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.6.0 +Version: v7.8.0 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.6.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.8.0/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -12701,11 +12727,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.7.3 +Version: v0.7.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.5/LICENSE: Apache License Version 2.0, January 2004 @@ -13437,11 +13463,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-concert@v0.2 -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.11.1 +Version: v8.12.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.11.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.12.0/LICENSE: Apache License Version 2.0, January 2004 @@ -13648,11 +13674,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearc -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-libaudit/v2 -Version: v2.4.0 +Version: v2.5.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.4.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.5.0/LICENSE.txt: Apache License @@ -14955,11 +14981,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-sysinfo -Version: v1.11.2 +Version: v1.12.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.11.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.12.0/LICENSE.txt: Apache License @@ -15589,11 +15615,11 @@ limitations under the License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/mito -Version: v1.7.0 +Version: v1.8.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.7.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.8.0/LICENSE: Apache License @@ -17538,11 +17564,11 @@ Contents of probable licence file $GOMODCACHE/github.com/gomodule/redigo@v1.8.3/ -------------------------------------------------------------------------------- Dependency : github.com/google/cel-go -Version: v0.17.7 +Version: v0.19.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/cel-go@v0.17.7/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/cel-go@v0.19.0/LICENSE: Apache License @@ -20363,11 +20389,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/lestrrat-go/jwx/v2 -Version: v2.0.11 +Version: v2.0.19 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/jwx/v2@v2.0.11/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/jwx/v2@v2.0.19/LICENSE: The MIT License (MIT) @@ -25520,11 +25546,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.58.3/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.31.0 +Version: v1.32.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.31.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.32.0/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. @@ -31307,39 +31333,41 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/antlr/antlr4/runtime/Go/antlr/v4 -Version: v4.0.0-20230305170008-8188dc5388df +Dependency : github.com/antlr4-go/antlr/v4 +Version: v4.13.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/antlr/antlr4/runtime/!go/antlr/v4@v4.0.0-20230305170008-8188dc5388df/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/antlr4-go/antlr/v4@v4.13.0/LICENSE: -Copyright 2021 The ANTLR Project +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- @@ -36165,6 +36193,39 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/cilium/ebpf +Version: v0.12.3 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cilium/ebpf@v0.12.3/LICENSE: + +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/codegangsta/inject Version: v0.0.0-20150114235600-33e0aa1cb7c0 @@ -37380,11 +37441,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-transport-go/v8 -Version: v8.3.0 +Version: v8.4.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.4.0/LICENSE: Apache License Version 2.0, January 2004 @@ -38170,11 +38231,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/frankban/quicktest -Version: v1.14.3 +Version: v1.14.5 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/frankban/quicktest@v1.14.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/frankban/quicktest@v1.14.5/LICENSE: MIT License @@ -38199,6 +38260,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/go-faker/faker/v4 +Version: v4.2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/go-faker/faker/v4@v4.2.0/LICENSE: + +MIT License + +Copyright (c) 2017 Iman Tumorang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/go-logfmt/logfmt Version: v0.5.1 @@ -38233,11 +38325,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/go-logr/logr -Version: v1.2.4 +Version: v1.3.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v1.2.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v1.3.0/LICENSE: Apache License Version 2.0, January 2004 @@ -45641,11 +45733,11 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/kr/pretty -Version: v0.3.0 +Version: v0.3.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.3.0/License: +Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.3.1/License: Copyright 2012 Keith Rarick @@ -45911,11 +46003,11 @@ Contents of probable licence file $GOMODCACHE/github.com/kylelemons/godebug@v1.1 -------------------------------------------------------------------------------- Dependency : github.com/lestrrat-go/blackmagic -Version: v1.0.1 +Version: v1.0.2 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/blackmagic@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/blackmagic@v1.0.2/LICENSE: MIT License @@ -52424,11 +52516,11 @@ Contents of probable licence file $GOMODCACHE/go.opencensus.io@v0.24.0/LICENSE: -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/otel -Version: v1.19.0 +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -52635,11 +52727,222 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.19.0/L -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/otel/metric -Version: v1.19.0 +Version: v1.21.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.21.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/otel/sdk +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -52846,11 +53149,11 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1 -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/otel/trace -Version: v1.19.0 +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1.21.0/LICENSE: Apache License Version 2.0, January 2004 diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index 4ea656f174ea..a68f7e1094a4 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -30,6 +30,24 @@ stages: unitTest: mage: "mage build unitTest" stage: mandatory + integTest: + mage: "mage build integTest" + when: + comments: + - "/test auditbeat integTest" + branches: false + tags: false + stage: extended + integTest-arm: + mage: "mage build integTest" + platforms: + - "ubuntu-2204-aarch64" + when: + comments: + - "/test auditbeat integTest arm" + branches: false + tags: false + stage: extended crosscompile: make: "make -C auditbeat crosscompile" stage: mandatory diff --git a/auditbeat/docs/fields.asciidoc b/auditbeat/docs/fields.asciidoc index bd4db4ce5b6c..9eee5f008fc1 100644 --- a/auditbeat/docs/fields.asciidoc +++ b/auditbeat/docs/fields.asciidoc @@ -18925,6 +18925,28 @@ type: keyword -- +*`process.thread.capabilities.effective`*:: ++ +-- +This is the set of capabilities used by the kernel to perform permission checks for the thread. + +type: keyword + +example: ["CAP_BPF", "CAP_SYS_ADMIN"] + +-- + +*`process.thread.capabilities.permitted`*:: ++ +-- +This is a limiting superset for the effective capabilities that the thread may assume. + +type: keyword + +example: ["CAP_BPF", "CAP_SYS_ADMIN"] + +-- + [float] === hash diff --git a/auditbeat/tests/system/test_show_command.py b/auditbeat/tests/system/test_show_command.py index 3aa15c0aec24..843ab7e829dc 100644 --- a/auditbeat/tests/system/test_show_command.py +++ b/auditbeat/tests/system/test_show_command.py @@ -98,6 +98,7 @@ def test_show_auditd_status(self): 'lost', 'backlog', 'backlog_wait_time', + 'backlog_wait_time_actual', 'features', ] diff --git a/x-pack/auditbeat/tracing/cpu.go b/auditbeat/tracing/cpu.go similarity index 73% rename from x-pack/auditbeat/tracing/cpu.go rename to auditbeat/tracing/cpu.go index e0fd15e09ceb..280cc395bf10 100644 --- a/x-pack/auditbeat/tracing/cpu.go +++ b/auditbeat/tracing/cpu.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -9,7 +22,7 @@ package tracing import ( "bytes" "fmt" - "io/ioutil" + "os" "strconv" "strings" ) @@ -72,7 +85,7 @@ func (s CPUSet) AsList() []int { // NewCPUSetFromFile creates a new CPUSet from the contents of a file. func NewCPUSetFromFile(path string) (cpus CPUSet, err error) { - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) if err != nil { return cpus, err } @@ -84,9 +97,12 @@ func NewCPUSetFromFile(path string) (cpus CPUSet, err error) { // Where: // RANGE := | - func NewCPUSetFromExpression(contents string) (CPUSet, error) { - var ranges [][]int - var max, count int - for _, expr := range strings.Split(contents, ",") { + expressions := strings.Split(contents, ",") + + ranges := make([][]int, 0, len(expressions)) + + var maximum, count int + for _, expr := range expressions { if len(expr) == 0 { continue } @@ -99,16 +115,16 @@ func NewCPUSetFromExpression(contents string) (CPUSet, error) { } num := int(num16) r = append(r, num) - if num+1 > max { - max = num + 1 + if num+1 > maximum { + maximum = num + 1 } } ranges = append(ranges, r) } - if max == 0 { + if maximum == 0 { return CPUSet{}, nil } - mask := make([]bool, max) + mask := make([]bool, maximum) for _, r := range ranges { from, to := -1, -1 switch len(r) { diff --git a/x-pack/auditbeat/tracing/cpu_test.go b/auditbeat/tracing/cpu_test.go similarity index 76% rename from x-pack/auditbeat/tracing/cpu_test.go rename to auditbeat/tracing/cpu_test.go index 3f6921895daf..bfce3a72de0d 100644 --- a/x-pack/auditbeat/tracing/cpu_test.go +++ b/auditbeat/tracing/cpu_test.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux diff --git a/x-pack/auditbeat/tracing/decoder.go b/auditbeat/tracing/decoder.go similarity index 90% rename from x-pack/auditbeat/tracing/decoder.go rename to auditbeat/tracing/decoder.go index 8755b25f5dd9..d669e8c8e982 100644 --- a/x-pack/auditbeat/tracing/decoder.go +++ b/auditbeat/tracing/decoder.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -183,9 +196,13 @@ func NewStructDecoder(desc ProbeFormat, allocFn AllocateFn) (Decoder, error) { } var name string + var allowUndefined bool var greedy bool for idx, param := range strings.Split(values, ",") { switch param { + case "allowundefined": + // it is okay not to find it in the desc.Fields + allowUndefined = true case "greedy": greedy = true default: @@ -214,6 +231,9 @@ func NewStructDecoder(desc ProbeFormat, allocFn AllocateFn) (Decoder, error) { inField, found := desc.Fields[name] if !found { + if allowUndefined { + continue + } return nil, fmt.Errorf("field '%s' not found in kprobe format description", name) } @@ -326,14 +346,14 @@ func (d *structDecoder) Decode(raw []byte, meta Metadata) (s interface{}, err er case FieldTypeString: offset := uintptr(MachineEndian.Uint16(raw[dec.src:])) - len := uintptr(MachineEndian.Uint16(raw[dec.src+2:])) - if offset+len > n { + length := uintptr(MachineEndian.Uint16(raw[dec.src+2:])) + if offset+length > n { return nil, fmt.Errorf("perf event string data for field %s overflows message of size %d", dec.name, n) } - if len > 0 && raw[offset+len-1] == 0 { - len-- + if length > 0 && raw[offset+length-1] == 0 { + length-- } - *(*string)(unsafe.Add(destPtr, dec.dst)) = string(raw[offset : offset+len]) + *(*string)(unsafe.Add(destPtr, dec.dst)) = string(raw[offset : offset+length]) case FieldTypeMeta: *(*Metadata)(unsafe.Add(destPtr, dec.dst)) = meta @@ -357,7 +377,8 @@ type dumpDecoder struct { // - integer of 64bit (u64 / s64). // - dump consecutive memory. func NewDumpDecoder(format ProbeFormat) (Decoder, error) { - var fields []Field + fields := make([]Field, 0, len(format.Fields)) + for name, field := range format.Fields { if strings.Index(name, "arg") != 0 { continue diff --git a/auditbeat/tracing/doc.go b/auditbeat/tracing/doc.go new file mode 100644 index 000000000000..5f4e8b92331e --- /dev/null +++ b/auditbeat/tracing/doc.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package tracing provides a set of tools built on top of +// golang.org/x/sys/unix/linux/perf that simplify working with KProbes and +// UProbes, using tracing perf channels to receive events from the kernel and +// decoding of this raw events into more useful types. +package tracing diff --git a/auditbeat/tracing/endian.go b/auditbeat/tracing/endian.go new file mode 100644 index 000000000000..d7fa00c6fa20 --- /dev/null +++ b/auditbeat/tracing/endian.go @@ -0,0 +1,28 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package tracing + +import ( + "encoding/binary" +) + +// MachineEndian is either binary.BigEndian or binary.LittleEndian, depending +// on the current architecture. +var MachineEndian = binary.NativeEndian diff --git a/x-pack/auditbeat/tracing/events_test.go b/auditbeat/tracing/events_test.go similarity index 90% rename from x-pack/auditbeat/tracing/events_test.go rename to auditbeat/tracing/events_test.go index d89f4946ca19..0b5efaec53a5 100644 --- a/x-pack/auditbeat/tracing/events_test.go +++ b/auditbeat/tracing/events_test.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -9,7 +22,6 @@ package tracing import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -301,7 +313,7 @@ func TestKProbeReal(t *testing.T) { func TestKProbeEventsList(t *testing.T) { // Make dir to monitor. - tmpDir, err := ioutil.TempDir("", "events_test") + tmpDir, err := os.MkdirTemp("", "events_test") if err != nil { t.Fatal(err) } @@ -358,7 +370,7 @@ w:future feature func TestKProbeEventsAddRemoveKProbe(t *testing.T) { // Make dir to monitor. - tmpDir, err := ioutil.TempDir("", "events_test") + tmpDir, err := os.MkdirTemp("", "events_test") if err != nil { t.Fatal(err) } @@ -397,7 +409,7 @@ w:future feature off, err := file.Seek(int64(0), io.SeekStart) assert.NoError(t, err) assert.Equal(t, int64(0), off) - contents, err := ioutil.ReadAll(file) + contents, err := io.ReadAll(file) assert.NoError(t, err) expected := append([]byte(baseContents), []byte( `p:kprobe/myprobe sys_open path=+0(%di):string mode=%si diff --git a/auditbeat/tracing/int_aligned.go b/auditbeat/tracing/int_aligned.go new file mode 100644 index 000000000000..cbcadf96f324 --- /dev/null +++ b/auditbeat/tracing/int_aligned.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux && !386 && !amd64 && !amd64p32 + +// Alignment-safe integer reading and writing functions. + +package tracing + +import ( + "errors" + "unsafe" +) + +var errBadSize = errors.New("bad size for integer") + +func copyInt(dst unsafe.Pointer, src unsafe.Pointer, len uint8) error { + copy(unsafe.Slice((*byte)(dst), len), unsafe.Slice((*byte)(src), len)) + return nil +} + +func readInt(ptr unsafe.Pointer, len uint8, signed bool) (any, error) { + var value any + asSlice := unsafe.Slice((*byte)(ptr), len) + switch len { + case 1: + if signed { + value = int8(asSlice[0]) + } else { + value = asSlice[0] + } + case 2: + if signed { + value = int16(MachineEndian.Uint16(asSlice)) + } else { + value = MachineEndian.Uint16(asSlice) + } + + case 4: + if signed { + value = int32(MachineEndian.Uint32(asSlice)) + } else { + value = MachineEndian.Uint32(asSlice) + } + + case 8: + if signed { + value = int64(MachineEndian.Uint64(asSlice)) + } else { + value = MachineEndian.Uint64(asSlice) + } + + default: + return nil, errBadSize + } + return value, nil +} diff --git a/x-pack/auditbeat/tracing/int_unaligned.go b/auditbeat/tracing/int_unaligned.go similarity index 52% rename from x-pack/auditbeat/tracing/int_unaligned.go rename to auditbeat/tracing/int_unaligned.go index 38a767dd6421..d4c1a3f6b167 100644 --- a/x-pack/auditbeat/tracing/int_unaligned.go +++ b/auditbeat/tracing/int_unaligned.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux && (386 || amd64 || amd64p32) @@ -35,7 +48,9 @@ func copyInt(dst unsafe.Pointer, src unsafe.Pointer, len uint8) error { return nil } -func readInt(ptr unsafe.Pointer, len uint8, signed bool) (value interface{}, err error) { +func readInt(ptr unsafe.Pointer, len uint8, signed bool) (any, error) { + var value any + switch len { case 1: if signed { @@ -67,5 +82,5 @@ func readInt(ptr unsafe.Pointer, len uint8, signed bool) (value interface{}, err default: return nil, errBadSize } - return + return value, nil } diff --git a/x-pack/auditbeat/tracing/perfevent.go b/auditbeat/tracing/perfevent.go similarity index 88% rename from x-pack/auditbeat/tracing/perfevent.go rename to auditbeat/tracing/perfevent.go index 4b97772b18fc..36f595aa6761 100644 --- a/x-pack/auditbeat/tracing/perfevent.go +++ b/auditbeat/tracing/perfevent.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -57,14 +70,14 @@ type PerfChannel struct { cpus CPUSet // Settings - attr perf.Attr - mappedPages int - pid int - pollTimeout time.Duration - sizeSampleC int - sizeErrC int - sizeLostC int - withTime bool + attr perf.Attr + mappedPages int + pid int + pollTimeout time.Duration + sizeSampleC int + sizeErrC int + sizeLostC int + wakeUpEvents uint32 } // PerfChannelConf instances change the configuration of a perf channel. @@ -89,14 +102,15 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { // Defaults channel = &PerfChannel{ - sizeSampleC: 1024, - sizeErrC: 8, - sizeLostC: 64, - mappedPages: 64, - pollTimeout: time.Millisecond * 200, - done: make(chan struct{}, 0), - streams: make(map[uint64]stream), - pid: perf.AllThreads, + sizeSampleC: 1024, + sizeErrC: 8, + sizeLostC: 64, + mappedPages: 64, + wakeUpEvents: 1, + pollTimeout: time.Millisecond * 200, + done: make(chan struct{}), + streams: make(map[uint64]stream), + pid: perf.AllThreads, attr: perf.Attr{ Type: perf.TracepointEvent, ClockID: unix.CLOCK_MONOTONIC, @@ -108,8 +122,6 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { }, }, } - channel.attr.SetSamplePeriod(1) - channel.attr.SetWakeupEvents(1) // Load the list of online CPUs from /sys/devices/system/cpu/online. // This is necessary in order to to install each kprobe on all online CPUs. @@ -130,6 +142,10 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { return nil, err } } + + channel.attr.SetSamplePeriod(1) + channel.attr.SetWakeupEvents(channel.wakeUpEvents) + return channel, nil } @@ -157,6 +173,18 @@ func WithErrBufferSize(size int) PerfChannelConf { } } +// WithWakeUpEvents configures sets how many samples happen before an overflow +// notification happens. Setting wakeUpEvents to 0 is equivalent to 1. +func WithWakeUpEvents(wakeUpEvents uint32) PerfChannelConf { + return func(channel *PerfChannel) error { + if wakeUpEvents == 0 { + wakeUpEvents = 1 + } + channel.wakeUpEvents = wakeUpEvents + return nil + } +} + // WithLostBufferSize configures the capacity of the channel used to pass lost // event notifications (PerfChannel.LostC()). func WithLostBufferSize(size int) PerfChannelConf { @@ -462,7 +490,7 @@ func (m *recordMerger) readSampleNonBlock(ev *perf.Event, ctx context.Context) ( return nil, false } if err != nil { - if err == perf.ErrBadRecord { + if errors.Is(err, perf.ErrBadRecord) { m.channel.lostC <- ^uint64(0) continue } @@ -503,7 +531,7 @@ func pollAll(evs []*perf.Event, timeout time.Duration) (active int, closed int, } ts := unix.NsecToTimespec(timeout.Nanoseconds()) - for err = unix.EINTR; err == unix.EINTR; { + for err = unix.EINTR; errors.Is(err, unix.EINTR); { _, err = unix.Ppoll(pollfds, &ts, nil) } if err != nil { @@ -518,5 +546,5 @@ func pollAll(evs []*perf.Event, timeout time.Duration) (active int, closed int, closed++ } } - return + return active, closed, err } diff --git a/x-pack/auditbeat/tracing/probe.go b/auditbeat/tracing/probe.go similarity index 80% rename from x-pack/auditbeat/tracing/probe.go rename to auditbeat/tracing/probe.go index 61bf353ef5f5..5bfd5977c075 100644 --- a/x-pack/auditbeat/tracing/probe.go +++ b/auditbeat/tracing/probe.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux diff --git a/x-pack/auditbeat/tracing/tracefs.go b/auditbeat/tracing/tracefs.go similarity index 89% rename from x-pack/auditbeat/tracing/tracefs.go rename to auditbeat/tracing/tracefs.go index b26eb17312c3..532eb75ca459 100644 --- a/x-pack/auditbeat/tracing/tracefs.go +++ b/auditbeat/tracing/tracefs.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -26,9 +39,9 @@ const ( var ( // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe - kprobeRegexp *regexp.Regexp = regexp.MustCompile("^([pr])[0-9]*:(?:([^/ ]*)/)?([^/ ]+) ([^ ]+) ?(.*)") + kprobeRegexp *regexp.Regexp = regexp.MustCompile(`^([pr])[0-9]*:(?:([^/ ]*)/)?([^/ ]+) ([^ ]+) ?(.*)`) - formatRegexp *regexp.Regexp = regexp.MustCompile("\\s+([^:]+):([^;]*);") + formatRegexp *regexp.Regexp = regexp.MustCompile(`\s+([^:]+):([^;]*);`) ) // TraceFS is an accessor to manage event tracing via tracefs or debugfs. @@ -72,13 +85,14 @@ func IsTraceFSAvailableAt(path string) error { // IsTraceFSAvailable returns nil if a tracefs or debugfs supporting KProbes // is available at the well-known paths. Otherwise returns an error. -func IsTraceFSAvailable() (err error) { +func IsTraceFSAvailable() error { + var err error for _, path := range []string{traceFSPath, debugFSTracingPath} { if err = IsTraceFSAvailableAt(path); err == nil { - break + return nil } } - return + return err } // ListKProbes lists the currently installed kprobes / kretprobes @@ -122,7 +136,7 @@ func (dfs *TraceFS) listProbes(filename string) (probes []Probe, err error) { } // AddKProbe installs a new kprobe/kretprobe. -func (dfs *TraceFS) AddKProbe(probe Probe) (err error) { +func (dfs *TraceFS) AddKProbe(probe Probe) error { return dfs.appendToFile(kprobeCfgFile, probe.String()) } diff --git a/catalog-info.yaml b/catalog-info.yaml index 4d6c956f1f3f..fb0395d20277 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -99,9 +99,8 @@ spec: cancel_intermediate_builds_branch_filter: '!main !7.* !8.*' skip_intermediate_builds: true skip_intermediate_builds_branch_filter: '!main !7.* !8.*' - # TODO uncomment this environment variable when pipeline definition is updated - # env: - # ELASTIC_PR_COMMENTS_ENABLED: 'true' + env: + ELASTIC_PR_COMMENTS_ENABLED: 'true' teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -130,9 +129,9 @@ spec: name: filebeat description: "Filebeat pipeline" spec: -# branch_configuration: "main 7.* 8.* v7.* v8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.* 8.* v7.* v8.*" pipeline_file: ".buildkite/filebeat/filebeat-pipeline.yml" -# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -145,8 +144,8 @@ spec: cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.* !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -310,9 +309,9 @@ spec: name: beats-libbeat description: "Beats libbeat pipeline" spec: -# branch_configuration: "main 7.17 8.* v7.17 v8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.17 8.* pipeline_file: ".buildkite/libbeat/pipeline.libbeat.yml" -# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -322,11 +321,11 @@ spec: build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) repository: elastic/beats cancel_intermediate_builds: true - cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -377,3 +376,92 @@ spec: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-beats-xpack-elastic-agent + description: "Beats xpack elastic agent" + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-xpack-elastic-agent + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-xpack-elastic-agent + description: "Beats xpack elastic agent pipeline" + spec: + branch_configuration: "7.17" + pipeline_file: ".buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml" + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-beats-winlogbeat + description: "Beats winlogbeat pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-winlogbeat + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-winlogbeat + description: "Beats winlogbeat pipeline" + spec: +# branch_configuration: "main 7.17 8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/winlogbeat/pipeline.winlogbeat.yml" +# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY diff --git a/dev-tools/notice/overrides.json b/dev-tools/notice/overrides.json index 1484fcde52a0..eee18acc0de5 100644 --- a/dev-tools/notice/overrides.json +++ b/dev-tools/notice/overrides.json @@ -17,3 +17,4 @@ {"name": "github.com/awslabs/kinesis-aggregation/go/v2", "licenceType": "Apache-2.0", "url": "https://github.com/awslabs/kinesis-aggregation/blob/master/LICENSE.txt"} {"name": "github.com/dnaeon/go-vcr", "licenceType": "BSD-2-Clause"} {"name": "github.com/JohnCGriffin/overflow", "licenceType": "MIT"} +{"name": "github.com/elastic/ebpfevents", "licenceType": "Apache-2.0"} diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index e01b6c566e5a..fff920b429c2 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -714,9 +714,11 @@ func readZip(t *testing.T, zipFile string, inspectors ...inspector) (*packageFil } func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { - // Read the manifest file first so that the config file and layer - // names are known in advance. - manifest, err := getDockerManifest(dockerFile) + var manifest *dockerManifest + var info *dockerInfo + layers := make(map[string]*packageFile) + + manifest, err := readManifest(dockerFile) if err != nil { return nil, nil, err } @@ -727,9 +729,6 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { } defer file.Close() - var info *dockerInfo - layers := make(map[string]*packageFile) - gzipReader, err := gzip.NewReader(file) if err != nil { return nil, nil, err @@ -770,11 +769,7 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { // Read layers in order and for each file keep only the entry seen in the later layer p := &packageFile{Name: filepath.Base(dockerFile), Contents: map[string]packageEntry{}} - for _, layer := range manifest.Layers { - layerFile, found := layers[layer] - if !found { - return nil, nil, fmt.Errorf("layer not found: %s", layer) - } + for _, layerFile := range layers { for name, entry := range layerFile.Contents { // Check only files in working dir and entrypoint if strings.HasPrefix("/"+name, workingDir) || "/"+name == entrypoint { @@ -799,22 +794,21 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { return p, info, nil } -// getDockerManifest opens a gzipped tar file to read the Docker manifest.json -// that it is expected to contain. -func getDockerManifest(file string) (*dockerManifest, error) { - f, err := os.Open(file) +func readManifest(dockerFile string) (*dockerManifest, error) { + var manifest *dockerManifest + + file, err := os.Open(dockerFile) if err != nil { return nil, err } - defer f.Close() + defer file.Close() - gzipReader, err := gzip.NewReader(f) + gzipReader, err := gzip.NewReader(file) if err != nil { return nil, err } defer gzipReader.Close() - var manifest *dockerManifest tarReader := tar.NewReader(gzipReader) for { header, err := tarReader.Next() @@ -833,8 +827,7 @@ func getDockerManifest(file string) (*dockerManifest, error) { break } } - - return manifest, nil + return manifest, err } type dockerManifest struct { diff --git a/docs/devguide/testing.asciidoc b/docs/devguide/testing.asciidoc index 49d2366c920a..9488fe47dcee 100644 --- a/docs/devguide/testing.asciidoc +++ b/docs/devguide/testing.asciidoc @@ -50,11 +50,11 @@ In Metricbeat, run the command from within a module like this: `go test --tags i A note about tags: the `--data` flag is a custom flag added by Metricbeat and Packetbeat frameworks. It will not be present in case tags do not match, as the relevant code will not be run and silently skipped (without the tag the test file is ignored by Go compiler so the framework doesn't load). This may happen if there are different tags in the build tags of the metricset under test (i.e. the GCP billing metricset requires the `billing` tag too). -==== Running Python Tests +==== Running System (integration) Tests (Python and Go) -Python system tests are defined in the `tests/system` directory. They require a testing binary to be available and the python environment to be set up. +The system tests are defined in the `tests/system` (for legacy Python test) and on `tests/integration` (for Go tests) directory. They require a testing binary to be available and the python environment to be set up. -To create the testing binary run `mage buildSystemTestBinary`. This will create the test binary in the beat directory. To setup the testing environment run `mage pythonVirtualEnv` which will create a virtual environment with all test dependencies and print its location. To activate it, the instructions depend on your operating system. See the https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/#activating-a-virtual-environment[virtualenv documentation]. +To create the testing binary run `mage buildSystemTestBinary`. This will create the test binary in the beat directory. To set up the Python testing environment run `mage pythonVirtualEnv` which will create a virtual environment with all test dependencies and print its location. To activate it, the instructions depend on your operating system. See the https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/#activating-a-virtual-environment[virtualenv documentation]. To run the system and integration tests use the `mage pythonIntegTest` target, which will start the required services using https://docs.docker.com/compose/[docker-compose] and run all integration tests. Similar to Go integration tests, the individual steps can be done manually to allow selecting which tests should be run: @@ -62,12 +62,16 @@ To run the system and integration tests use the `mage pythonIntegTest` target, w ---- # Create and activate the system test virtual environment (assumes a Unix system). source $(mage pythonVirtualEnv)/bin/activate + # Pull and build the containers. Only needs to be done once unless you change the containers. mage docker:composeBuild + # Bring up all containers, wait until they are healthy, and put them in the background. mage docker:composeUp + # Run all system and integration tests. INTEGRATION_TESTS=1 pytest ./tests/system + # Stop all started containers. mage docker:composeDown ---- diff --git a/filebeat/filebeat_windows_amd64.syso b/filebeat/filebeat_windows_amd64.syso new file mode 100644 index 000000000000..c52af94f8e05 Binary files /dev/null and b/filebeat/filebeat_windows_amd64.syso differ diff --git a/filebeat/fileset/flags.go b/filebeat/fileset/flags.go index a8ef562d757d..674c6fe7fd9f 100644 --- a/filebeat/fileset/flags.go +++ b/filebeat/fileset/flags.go @@ -23,7 +23,6 @@ import ( "strings" "github.com/elastic/elastic-agent-libs/config" - conf "github.com/elastic/elastic-agent-libs/config" ) // Modules related command line flags. @@ -32,11 +31,11 @@ var ( moduleOverrides = config.SettingFlag(nil, "M", "Module configuration overwrite") ) -type ModuleOverrides map[string]map[string]*conf.C // module -> fileset -> Config +type ModuleOverrides map[string]map[string]*config.C // module -> fileset -> Config // Get returns an array of configuration overrides that should be merged in order. -func (mo *ModuleOverrides) Get(module, fileset string) []*conf.C { - ret := []*conf.C{} +func (mo *ModuleOverrides) Get(module, fileset string) []*config.C { + ret := []*config.C{} moduleWildcard := (*mo)["*"]["*"] if moduleWildcard != nil { diff --git a/filebeat/generator/fields/fields.go b/filebeat/generator/fields/fields.go index 4727990b2332..ba3216c04f36 100644 --- a/filebeat/generator/fields/fields.go +++ b/filebeat/generator/fields/fields.go @@ -179,15 +179,12 @@ func addNewField(fs []field, f field) []field { return append(fs, f) } -func getSemanticElementsFromPatterns(patterns []string) ([]field, error) { - r, err := regexp.Compile("{[\\.\\w\\:]*}") - if err != nil { - return nil, err - } +var semanticElementsRegex = regexp.MustCompile(`{[\.\w\:]*}`) +func getSemanticElementsFromPatterns(patterns []string) ([]field, error) { var fs []field for _, lp := range patterns { - pp := r.FindAllString(lp, -1) + pp := semanticElementsRegex.FindAllString(lp, -1) for _, p := range pp { f := newField(p) if f.SemanticElements == nil { @@ -221,9 +218,7 @@ func accumulateRemoveFields(remove interface{}, out []string) []string { case string: return append(out, vs) case []string: - for _, vv := range vs { - out = append(out, vv) - } + out = append(out, vs...) case []interface{}: for _, vv := range vs { vvs := vv.(string) diff --git a/filebeat/input/filestream/internal/task/group_test.go b/filebeat/input/filestream/internal/task/group_test.go index 553070e5ec73..5ce15d455e3e 100644 --- a/filebeat/input/filestream/internal/task/group_test.go +++ b/filebeat/input/filestream/internal/task/group_test.go @@ -241,12 +241,14 @@ func TestGroup_Go(t *testing.T) { want := uint64(2) g := NewGroup(want, time.Second, logger, "errorPrefix") - wg.Add(2) + wg.Add(1) err := g.Go(workload(1)) require.NoError(t, err) + wg.Wait() + + wg.Add(1) err = g.Go(workload(2)) require.NoError(t, err) - wg.Wait() err = g.Stop() diff --git a/filebeat/input/journald/pkg/journalfield/conv.go b/filebeat/input/journald/pkg/journalfield/conv.go index bd7403ae142f..94447b773b7e 100644 --- a/filebeat/input/journald/pkg/journalfield/conv.go +++ b/filebeat/input/journald/pkg/journalfield/conv.go @@ -19,11 +19,11 @@ package journalfield import ( "fmt" - "math/bits" "regexp" "strconv" "strings" + "github.com/elastic/beats/v7/libbeat/common/capabilities" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -190,72 +190,13 @@ func expandCapabilities(fields mapstr.M) { if !ok { return } - w, err := strconv.ParseUint(c, 16, 64) - if err != nil { - return - } - if w == 0 { + caps, err := capabilities.FromString(c, 16) + if err != nil || len(caps) == 0 { return } - caps := make([]string, 0, bits.OnesCount64(w)) - for i := 0; w != 0; i++ { - if w&1 != 0 { - if i < len(capTable) { - caps = append(caps, capTable[i]) - } else { - caps = append(caps, strconv.Itoa(i)) - } - } - w >>= 1 - } fields.Put("process.thread.capabilities.effective", caps) } -// include/uapi/linux/capability.h -var capTable = [...]string{ - 0: "CAP_CHOWN", - 1: "CAP_DAC_OVERRIDE", - 2: "CAP_DAC_READ_SEARCH", - 3: "CAP_FOWNER", - 4: "CAP_FSETID", - 5: "CAP_KILL", - 6: "CAP_SETGID", - 7: "CAP_SETUID", - 8: "CAP_SETPCAP", - 9: "CAP_LINUX_IMMUTABLE", - 10: "CAP_NET_BIND_SERVICE", - 11: "CAP_NET_BROADCAST", - 12: "CAP_NET_ADMIN", - 13: "CAP_NET_RAW", - 14: "CAP_IPC_LOCK", - 15: "CAP_IPC_OWNER", - 16: "CAP_SYS_MODULE", - 17: "CAP_SYS_RAWIO", - 18: "CAP_SYS_CHROOT", - 19: "CAP_SYS_PTRACE", - 20: "CAP_SYS_PACCT", - 21: "CAP_SYS_ADMIN", - 22: "CAP_SYS_BOOT", - 23: "CAP_SYS_NICE", - 24: "CAP_SYS_RESOURCE", - 25: "CAP_SYS_TIME", - 26: "CAP_SYS_TTY_CONFIG", - 27: "CAP_MKNOD", - 28: "CAP_LEASE", - 29: "CAP_AUDIT_WRITE", - 30: "CAP_AUDIT_CONTROL", - 31: "CAP_SETFCAP", - 32: "CAP_MAC_OVERRIDE", - 33: "CAP_MAC_ADMIN", - 34: "CAP_SYSLOG", - 35: "CAP_WAKE_ALARM", - 36: "CAP_BLOCK_SUSPEND", - 37: "CAP_AUDIT_READ", - 38: "CAP_PERFMON", - 39: "CAP_BPF", - 40: "CAP_CHECKPOINT_RESTORE", -} - func getStringFromFields(key string, fields mapstr.M) string { value, _ := fields.GetValue(key) str, _ := value.(string) diff --git a/filebeat/input/journald/pkg/journalfield/conv_expand_test.go b/filebeat/input/journald/pkg/journalfield/conv_expand_test.go index c43e57a1c494..09daf7c8f5b6 100644 --- a/filebeat/input/journald/pkg/journalfield/conv_expand_test.go +++ b/filebeat/input/journald/pkg/journalfield/conv_expand_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux && cgo + package journalfield import ( @@ -228,8 +230,8 @@ var expandCapabilitiesTests = []struct { "CAP_PERFMON", "CAP_BPF", "CAP_CHECKPOINT_RESTORE", - "41", - "42", + "CAP_41", + "CAP_42", }, }, }, diff --git a/filebeat/input/tcp/input.go b/filebeat/input/tcp/input.go index 762c6b6ba355..1b3ffa7c2aa4 100644 --- a/filebeat/input/tcp/input.go +++ b/filebeat/input/tcp/input.go @@ -238,31 +238,50 @@ func (m *inputMetrics) poll(addr, addr6 []string, each time.Duration, log *logp. // base level for the rx_queue values and ensures that if the // constructed address values are malformed we panic early // within the period of system testing. + want4 := true rx, err := procNetTCP("/proc/net/tcp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get initial tcp stats from /proc: %v", err) + want4 = false + log.Infof("did not get initial tcp stats from /proc: %v", err) } + want6 := true rx6, err := procNetTCP("/proc/net/tcp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get initial tcp6 stats from /proc: %v", err) + want6 = false + log.Infof("did not get initial tcp6 stats from /proc: %v", err) + } + if !want4 && !want6 { + log.Warnf("failed to get initial tcp or tcp6 stats from /proc: %v", err) + } else { + m.rxQueue.Set(uint64(rx + rx6)) } - m.rxQueue.Set(uint64(rx + rx6)) t := time.NewTicker(each) for { select { case <-t.C: + var found bool rx, err := procNetTCP("/proc/net/tcp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get tcp stats from /proc: %v", err) - continue + if want4 { + log.Warnf("failed to get tcp stats from /proc: %v", err) + } + } else { + found = true + want4 = true } rx6, err := procNetTCP("/proc/net/tcp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get tcp6 stats from /proc: %v", err) - continue + if want6 { + log.Warnf("failed to get tcp6 stats from /proc: %v", err) + } + } else { + found = true + want6 = true + } + if found { + m.rxQueue.Set(uint64(rx + rx6)) } - m.rxQueue.Set(uint64(rx + rx6)) case <-m.done: t.Stop() return @@ -323,10 +342,10 @@ func procNetTCP(path string, addr []string, hasUnspecified bool, addrIsUnspecifi } found = true - // queue lengths are decimal, e.g.: + // queue lengths are hex, e.g.: // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv4/tcp_ipv4.c#L2643 // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv6/tcp_ipv6.c#L1987 - v, err := strconv.ParseInt(string(r), 10, 64) + v, err := strconv.ParseInt(string(r), 16, 64) if err != nil { return 0, fmt.Errorf("failed to parse rx_queue: %w", err) } diff --git a/filebeat/input/udp/input.go b/filebeat/input/udp/input.go index 831fb41c2ee6..cd7ca0c56051 100644 --- a/filebeat/input/udp/input.go +++ b/filebeat/input/udp/input.go @@ -231,33 +231,52 @@ func (m *inputMetrics) poll(addr, addr6 []string, each time.Duration, log *logp. // base level for the rx_queue and drops values and ensures that // if the constructed address values are malformed we panic early // within the period of system testing. + want4 := true rx, drops, err := procNetUDP("/proc/net/udp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get initial udp stats from /proc: %v", err) + want4 = false + log.Infof("did not get initial udp stats from /proc: %v", err) } + want6 := true rx6, drops6, err := procNetUDP("/proc/net/udp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get initial udp6 stats from /proc: %v", err) + want6 = false + log.Infof("did not get initial udp6 stats from /proc: %v", err) + } + if !want4 && !want6 { + log.Warnf("failed to get initial udp or udp6 stats from /proc: %v", err) + } else { + m.rxQueue.Set(uint64(rx + rx6)) + m.drops.Set(uint64(drops + drops6)) } - m.rxQueue.Set(uint64(rx + rx6)) - m.drops.Set(uint64(drops + drops6)) t := time.NewTicker(each) for { select { case <-t.C: + var found bool rx, drops, err := procNetUDP("/proc/net/udp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get udp stats from /proc: %v", err) - continue + if want4 { + log.Warnf("failed to get udp stats from /proc: %v", err) + } + } else { + found = true + want4 = true } rx6, drops6, err := procNetUDP("/proc/net/udp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get udp6 stats from /proc: %v", err) - continue + if want6 { + log.Warnf("failed to get udp6 stats from /proc: %v", err) + } + } else { + found = true + want6 = true + } + if found { + m.rxQueue.Set(uint64(rx + rx6)) + m.drops.Set(uint64(drops + drops6)) } - m.rxQueue.Set(uint64(rx + rx6)) - m.drops.Set(uint64(drops + drops6)) case <-m.done: t.Stop() return @@ -321,10 +340,10 @@ func procNetUDP(path string, addr []string, hasUnspecified bool, addrIsUnspecifi } found = true - // queue lengths and drops are decimal, e.g.: + // queue lengths and drops are hex, e.g.: // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv4/udp.c#L3110 // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv6/datagram.c#L1048 - v, err := strconv.ParseInt(string(r), 10, 64) + v, err := strconv.ParseInt(string(r), 16, 64) if err != nil { return 0, 0, fmt.Errorf("failed to parse rx_queue: %w", err) } diff --git a/filebeat/tests/system/test_crawler.py b/filebeat/tests/system/test_crawler.py index fba8debcaea6..2bea57223fe8 100644 --- a/filebeat/tests/system/test_crawler.py +++ b/filebeat/tests/system/test_crawler.py @@ -197,7 +197,10 @@ def test_file_renaming(self): # expecting 6 more events self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10) + lambda: self.output_has( + lines=iterations1 + + iterations2), + max_timeout=10) filebeat.check_kill_and_wait() @@ -247,7 +250,10 @@ def test_file_disappear(self): # Let it read the file self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10) + lambda: self.output_has( + lines=iterations1 + + iterations2), + max_timeout=10) filebeat.check_kill_and_wait() @@ -317,7 +323,10 @@ def test_file_disappear_appear(self): # Let it read the file self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10) + lambda: self.output_has( + lines=iterations1 + + iterations2), + max_timeout=10) filebeat.check_kill_and_wait() @@ -468,7 +477,8 @@ def test_tail_files(self): f.write("hello world 2\n") f.flush() - # Sleep 1 second to make sure the file is persisted on disk and timestamp is in the past + # Sleep 1 second to make sure the file is persisted on disk and + # timestamp is in the past time.sleep(1) filebeat = self.start_beat() @@ -569,6 +579,7 @@ def test_encodings(self): with codecs.open(self.working_dir + "/log/test-{}".format(enc_py), "w", enc_py) as f: f.write(text + "\n") + f.close() # create the config file inputs = [] @@ -592,10 +603,11 @@ def test_encodings(self): with codecs.open(self.working_dir + "/log/test-{}".format(enc_py), "a", enc_py) as f: f.write(text + " 2" + "\n") + f.close() # wait again self.wait_until(lambda: self.output_has(lines=len(encodings) * 2), - max_timeout=15) + max_timeout=60) filebeat.check_kill_and_wait() # check that all outputs are present in the JSONs in UTF-8 diff --git a/go.mod b/go.mod index af936d4063e7..ee391fb43d20 100644 --- a/go.mod +++ b/go.mod @@ -69,16 +69,16 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/eapache/go-resiliency v1.2.0 github.com/eclipse/paho.mqtt.golang v1.3.5 - github.com/elastic/elastic-agent-client/v7 v7.6.0 + github.com/elastic/elastic-agent-client/v7 v7.8.0 github.com/elastic/go-concert v0.2.0 - github.com/elastic/go-libaudit/v2 v2.4.0 + github.com/elastic/go-libaudit/v2 v2.5.0 github.com/elastic/go-licenser v0.4.1 github.com/elastic/go-lookslike v1.0.1 github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 github.com/elastic/go-seccomp-bpf v1.4.0 github.com/elastic/go-structform v0.0.10 - github.com/elastic/go-sysinfo v1.11.2 + github.com/elastic/go-sysinfo v1.12.0 github.com/elastic/go-ucfg v0.8.6 github.com/elastic/gosigar v0.14.2 github.com/fatih/color v1.15.0 @@ -164,7 +164,7 @@ require ( google.golang.org/api v0.128.0 google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/grpc v1.58.3 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 gopkg.in/inf.v0 v0.9.1 gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect @@ -200,20 +200,21 @@ require ( github.com/aws/smithy-go v1.13.5 github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 + github.com/elastic/ebpfevents v0.3.2 github.com/elastic/elastic-agent-autodiscover v0.6.7 - github.com/elastic/elastic-agent-libs v0.7.3 + github.com/elastic/elastic-agent-libs v0.7.5 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 - github.com/elastic/go-elasticsearch/v8 v8.11.1 - github.com/elastic/mito v1.7.0 + github.com/elastic/go-elasticsearch/v8 v8.12.0 + github.com/elastic/mito v1.8.0 github.com/elastic/toutoumomoma v0.0.0-20221026030040-594ef30cb640 github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 - github.com/google/cel-go v0.17.7 + github.com/google/cel-go v0.19.0 github.com/googleapis/gax-go/v2 v2.12.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/icholy/digest v0.1.22 - github.com/lestrrat-go/jwx/v2 v2.0.11 + github.com/lestrrat-go/jwx/v2 v2.0.19 github.com/otiai10/copy v1.12.0 github.com/pierrec/lz4/v4 v4.1.18 github.com/pkg/xattr v0.4.9 @@ -247,7 +248,7 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/andybalholm/brotli v1.0.5 // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/apache/arrow/go/v12 v12.0.0 // indirect github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-radix v1.0.0 // indirect @@ -265,6 +266,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect + github.com/cilium/ebpf v0.12.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect @@ -273,14 +275,14 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/elastic/elastic-transport-go/v8 v8.3.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.4.0 // indirect github.com/elastic/go-windows v1.0.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-stack/stack v1.8.0 // indirect @@ -321,7 +323,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kortschak/utter v1.5.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lestrrat-go/blackmagic v1.0.1 // indirect + github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/httprc v1.0.4 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect @@ -365,9 +367,9 @@ require ( github.com/zeebo/xxh3 v1.0.2 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/go.sum b/go.sum index e238a72691a5..52051e7f4cf6 100644 --- a/go.sum +++ b/go.sum @@ -248,8 +248,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= @@ -430,6 +430,8 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -572,7 +574,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= @@ -658,27 +659,29 @@ github.com/elastic/bayeux v1.0.5 h1:UceFq01ipmT3S8DzFK+uVAkbCdiPR0Bqei8qIGmUeY0= github.com/elastic/bayeux v1.0.5/go.mod h1:CSI4iP7qeo5MMlkznGvYKftp8M7qqP/3nzmVZoXHY68= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqrj3lotWinO9+jFmeDXIC4gvIQs= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= +github.com/elastic/ebpfevents v0.3.2 h1:UJ8kW5jw2TpUR5MEMaZ1O62sK9JQ+5xTlj+YpQC6BXc= +github.com/elastic/ebpfevents v0.3.2/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lOTBgG/vt0efFCFARrf3g= github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= -github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= -github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= -github.com/elastic/elastic-agent-libs v0.7.3 h1:tc6JDXYR+2XFMHJVv+7+M0OwAbZPxm3caLJEd943dlE= -github.com/elastic/elastic-agent-libs v0.7.3/go.mod h1:9hlSaDPm0XTrUWrZjwvckgov1pDHnsGyybzAjNe/1wA= +github.com/elastic/elastic-agent-client/v7 v7.8.0 h1:GHFzDJIWpdgI0qDk5EcqbQJGvwTsl2E2vQK3/xe+MYQ= +github.com/elastic/elastic-agent-client/v7 v7.8.0/go.mod h1:ihtjqJzYiIltlRhNruaSSc0ogxIhqPD5hOMKq16cI1s= +github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= +github.com/elastic/elastic-agent-libs v0.7.5/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= github.com/elastic/elastic-agent-system-metrics v0.9.1/go.mod h1:9C1UEfj0P687HAzZepHszN6zXA+2tN2Lx3Osvq1zby8= -github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo= -github.com/elastic/elastic-transport-go/v8 v8.3.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= +github.com/elastic/elastic-transport-go/v8 v8.4.0 h1:EKYiH8CHd33BmMna2Bos1rDNMM89+hdgcymI+KzJCGE= +github.com/elastic/elastic-transport-go/v8 v8.4.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/glog v1.0.1-0.20210831205241-7d8b5c89dfc4/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/elastic/go-concert v0.2.0 h1:GAQrhRVXprnNjtvTP9pWJ1d4ToEA4cU5ci7TwTa20xg= github.com/elastic/go-concert v0.2.0/go.mod h1:HWjpO3IAEJUxOeaJOWXWEp7imKd27foxz9V5vegC/38= -github.com/elastic/go-elasticsearch/v8 v8.11.1 h1:1VgTgUTbpqQZ4uE+cPjkOvy/8aw1ZvKcU0ZUE5Cn1mc= -github.com/elastic/go-elasticsearch/v8 v8.11.1/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg= -github.com/elastic/go-libaudit/v2 v2.4.0 h1:PqaGnB+dncrdUXqzQMyJu/dGysAtk6m5V3GIBMY473I= -github.com/elastic/go-libaudit/v2 v2.4.0/go.mod h1:AjlnhinP+kKQuUJoXLVrqxBM8uyhQmkzoV6jjsCFP4Q= +github.com/elastic/go-elasticsearch/v8 v8.12.0 h1:krkiCf4peJa7bZwGegy01b5xWWaYpik78wvisTeRO1U= +github.com/elastic/go-elasticsearch/v8 v8.12.0/go.mod h1:wSzJYrrKPZQ8qPuqAqc6KMR4HrBfHnZORvyL+FMFqq0= +github.com/elastic/go-libaudit/v2 v2.5.0 h1:5OK919QRnGtcjVBz3n/cs5F42im1mPlVTA9TyIn2K54= +github.com/elastic/go-libaudit/v2 v2.5.0/go.mod h1:AjlnhinP+kKQuUJoXLVrqxBM8uyhQmkzoV6jjsCFP4Q= github.com/elastic/go-licenser v0.4.1 h1:1xDURsc8pL5zYT9R29425J3vkHdt4RT5TNEMeRN48x4= github.com/elastic/go-licenser v0.4.1/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= github.com/elastic/go-lookslike v1.0.1 h1:qVieyn6i/kx4xntar1cEB0qrGHVGNCX5KC8czAaTW/0= @@ -693,8 +696,8 @@ github.com/elastic/go-seccomp-bpf v1.4.0 h1:6y3lYrEHrLH9QzUgOiK8WDqmPaMnnB785Wxi github.com/elastic/go-seccomp-bpf v1.4.0/go.mod h1:wIMxjTbKpWGQk4CV9WltlG6haB4brjSH/dvAohBPM1I= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= -github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= -github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= +github.com/elastic/go-sysinfo v1.12.0 h1:ZKyB4N5XLnGFysNGNnJl8xvd+GBGCe2MemBykR+3yQI= +github.com/elastic/go-sysinfo v1.12.0/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= github.com/elastic/go-ucfg v0.8.6 h1:stUeyh2goTgGX+/wb9gzKvTv0YB0231LTpKUgCKj4U0= github.com/elastic/go-ucfg v0.8.6/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= @@ -703,8 +706,8 @@ github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a h1:8WfL/X6fK11 github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/mito v1.7.0 h1:cb4/z7Pt1Sonw92ucUMPcfbzX8MC+b6Hvf4ZMBJWg74= -github.com/elastic/mito v1.7.0/go.mod h1:nh7WSVimSs4d0N9Zakw+ZNOZL0wKl+jmQLT49JLxRQs= +github.com/elastic/mito v1.8.0 h1:i3GOtcnNuEEH2XMqnQdPvNjIBA8m0VKuTTfvusfCfnU= +github.com/elastic/mito v1.8.0/go.mod h1:n7AvUVtYQQXb8fq87FI8z67TNzuhwBV3kHBkDT1qJYQ= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3 h1:ChPwRVv1RR4a0cxoGjKcyWjTEpxYfm5gydMIzo32cAw= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c= github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= @@ -755,8 +758,8 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= +github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -768,6 +771,8 @@ github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0 github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-faker/faker/v4 v4.2.0 h1:dGebOupKwssrODV51E0zbMrv5e2gO9VWSLNC1WDCpWg= +github.com/go-faker/faker/v4 v4.2.0/go.mod h1:F/bBy8GH9NxOxMInug5Gx4WYeG6fHJZ8Ol/dhcpRub4= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= @@ -794,8 +799,8 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= @@ -1027,8 +1032,8 @@ github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUz github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.19.0 h1:vVgaZoHPBDd1lXCYGQOh5A06L4EtuIfmqQ/qnSXSKiU= +github.com/google/cel-go v0.19.0/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -1341,8 +1346,9 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1351,17 +1357,16 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lestrrat-go/blackmagic v1.0.1 h1:lS5Zts+5HIC/8og6cGHb0uCcNCa3OUt1ygh3Qz2Fe80= -github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= github.com/lestrrat-go/httprc v1.0.4 h1:bAZymwoZQb+Oq8MEbyipag7iSq6YIga8Wj6GOiJGdI8= github.com/lestrrat-go/httprc v1.0.4/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx/v2 v2.0.11 h1:ViHMnaMeaO0qV16RZWBHM7GTrAnX2aFLVKofc7FuKLQ= -github.com/lestrrat-go/jwx/v2 v2.0.11/go.mod h1:ZtPtMFlrfDrH2Y0iwfa3dRFn8VzwBrB+cyrm3IBWdDg= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/jwx/v2 v2.0.19 h1:ekv1qEZE6BVct89QA+pRF6+4pCpfVrOnEJnTnT4RXoY= +github.com/lestrrat-go/jwx/v2 v2.0.19/go.mod h1:l3im3coce1lL2cDeAjqmaR+Awx+X8Ih+2k8BuHNJ4CU= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -1962,12 +1967,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2041,7 +2048,6 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2184,7 +2190,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2344,7 +2349,6 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2352,7 +2356,6 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -2363,7 +2366,6 @@ golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2379,7 +2381,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2655,8 +2656,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/heartbeat/security/security.go b/heartbeat/security/security.go index 20c0f5cc7d68..8e15102f7b8d 100644 --- a/heartbeat/security/security.go +++ b/heartbeat/security/security.go @@ -26,8 +26,6 @@ import ( "strconv" "syscall" - sysinfo "github.com/elastic/go-sysinfo" - "kernel.org/pub/linux/libs/security/libcap/cap" ) @@ -36,13 +34,7 @@ func init() { // In the context of a container, where users frequently run as root, we follow BEAT_SETUID_AS to setuid/gid // and add capabilities to make this actually run as a regular user. This also helps Node.js in synthetics, which // does not want to run as root. It's also just generally more secure. - sysInfo, err := sysinfo.Host() - isContainer := false - if err == nil && sysInfo.Info().Containerized != nil { - isContainer = *sysInfo.Info().Containerized - } - - if localUserName := os.Getenv("BEAT_SETUID_AS"); isContainer && localUserName != "" && syscall.Geteuid() == 0 { + if localUserName := os.Getenv("BEAT_SETUID_AS"); localUserName != "" && syscall.Geteuid() == 0 { err := setNodeProcAttr(localUserName) if err != nil { panic(err) diff --git a/libbeat/autodiscover/template/config.go b/libbeat/autodiscover/template/config.go index 3ba0db210de9..c050ff8acd86 100644 --- a/libbeat/autodiscover/template/config.go +++ b/libbeat/autodiscover/template/config.go @@ -154,7 +154,7 @@ func ApplyConfigTemplate(event bus.Event, configs []*conf.C, options ...ucfg.Opt var unpacked map[string]interface{} err = c.Unpack(&unpacked, opts...) if err != nil { - logp.Warn("autodiscover: Configuration template cannot be resolved: %v", err) + logp.Debug("autodiscover", "Configuration template cannot be resolved: %v", err) continue } // Repack again: diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index efe8bd48f79a..4b7470b1dbd5 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -197,7 +197,7 @@ func initRand() { } else { seed = n.Int64() } - rand.Seed(seed) + rand.Seed(seed) //nolint:staticcheck // need seed from cryptographically strong PRNG. } // Run initializes and runs a Beater implementation. name is the name of the @@ -824,7 +824,10 @@ func (b *Beat) configure(settings Settings) error { return fmt.Errorf("failed to get host information: %w", err) } - fqdn, err := h.FQDN() + fqdnLookupCtx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + fqdn, err := h.FQDNWithContext(fqdnLookupCtx) if err != nil { // FQDN lookup is "best effort". We log the error, fallback to // the OS-reported hostname, and move on. @@ -835,10 +838,25 @@ func (b *Beat) configure(settings Settings) error { } // initialize config manager - b.Manager, err = management.NewManager(b.Config.Management, reload.RegisterV2) + m, err := management.NewManager(b.Config.Management, reload.RegisterV2) if err != nil { return err } + b.Manager = m + + if b.Manager.AgentInfo().Version != "" { + // During the manager initialization the client to connect to the agent is + // also initialized. That makes the beat to read information sent by the + // agent, which includes the AgentInfo with the agent's package version. + // Components running under agent should report the agent's package version + // as their own version. + // In order to do so b.Info.Version needs to be set to the version the agent + // sent. As this Beat instance is initialized much before the package + // version is received, it's overridden here. So far it's early enough for + // the whole beat to report the right version. + b.Info.Version = b.Manager.AgentInfo().Version + version.SetPackageVersion(b.Info.Version) + } if err := b.Manager.CheckRawConfig(b.RawConfig); err != nil { return err @@ -1518,13 +1536,13 @@ func (bc *beatConfig) Validate() error { if bc.Pipeline.Queue.IsSet() && outputPC.Queue.IsSet() { return fmt.Errorf("top level queue and output level queue settings defined, only one is allowed") } - //elastic-agent doesn't support disk queue yet + // elastic-agent doesn't support disk queue yet if bc.Management.Enabled() && outputPC.Queue.Config().Enabled() && outputPC.Queue.Name() == diskqueue.QueueType { return fmt.Errorf("disk queue is not supported when management is enabled") } } - //elastic-agent doesn't support disk queue yet + // elastic-agent doesn't support disk queue yet if bc.Management.Enabled() && bc.Pipeline.Queue.Config().Enabled() && bc.Pipeline.Queue.Name() == diskqueue.QueueType { return fmt.Errorf("disk queue is not supported when management is enabled") } diff --git a/libbeat/common/capabilities/capabilities_linux.go b/libbeat/common/capabilities/capabilities_linux.go new file mode 100644 index 000000000000..715b86d9bc7e --- /dev/null +++ b/libbeat/common/capabilities/capabilities_linux.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package capabilities + +import ( + "errors" + "math/bits" + "strconv" + "strings" + + "kernel.org/pub/linux/libs/security/libcap/cap" +) + +var ( + // errInvalidCapability expresses an invalid capability ID: x < 0 || x >= 64. + errInvalidCapability = errors.New("invalid capability") +) + +// The capability set flag/vector, re-exported from +// libcap(3). Inherit, Bound & Ambient not exported since we have no +// use for it yet. +type Flag = cap.Flag + +const ( + // aka CapEff + Effective = cap.Effective + // aka CapPrm + Permitted = cap.Permitted +) + +// Fetch the capabilities of pid for a given flag/vector and convert +// it to the representation used in ECS. cap.GetPID() fetches it with +// SYS_CAPGET. +// Returns errors.ErrUnsupported on "not linux". +func FromPid(flag Flag, pid int) ([]string, error) { + set, err := cap.GetPID(pid) + if err != nil { + return nil, err + } + empty, err := isEmpty(flag, set) + if err != nil { + return nil, err + } + if empty { + return []string{}, nil + } + + sl := make([]string, 0, cap.MaxBits()) + for i := 0; i < int(cap.MaxBits()); i++ { + c := cap.Value(i) + enabled, err := set.GetFlag(flag, c) + if err != nil { + return nil, err + } + if !enabled { + continue + } + s, err := toECS(i) + // impossible since MaxBits <= 64 + if err != nil { + return nil, err + } + sl = append(sl, s) + } + + return sl, err +} + +// Convert a uint64 to the capabilities representation used in ECS. +// Returns errors.ErrUnsupported on "not linux". +func FromUint64(w uint64) ([]string, error) { + sl := make([]string, 0, bits.OnesCount64(w)) + for i := 0; w != 0; i++ { + if w&1 != 0 { + s, err := toECS(i) + // impossible since MaxBits <= 64 + if err != nil { + return nil, err + } + sl = append(sl, s) + } + w >>= 1 + } + + return sl, nil +} + +// Convert a string to the capabilities representation used in +// ECS. Example input: "1ffffffffff", 16. +// Returns errors.ErrUnsupported on "not linux". +func FromString(s string, base int) ([]string, error) { + w, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + return FromUint64(w) +} + +// True if sets are equal for the given flag/vector, errors out in +// case any of the sets is malformed. +func isEqual(flag Flag, a *cap.Set, b *cap.Set) (bool, error) { + d, err := a.Cf(b) + if err != nil { + return false, err + } + + return !d.Has(flag), nil +} + +// Convert the capability ID to a string suitable to be used in +// ECS. +// If capabiliy ID X is unknown, but valid (0 <= X < 64), "CAP_X" +// will be returned instead. Fetches from an internal table built at +// startup. +var toECS = makeToECS() + +// Make toECS() which creates a map of every possible valid capability +// ID on startup. Returns errInvalidCapabilty for an invalid ID. +func makeToECS() func(int) (string, error) { + ecsNames := make(map[int]string) + + for i := 0; i < 64; i++ { + c := cap.Value(i) + if i < int(cap.MaxBits()) { + ecsNames[i] = strings.ToUpper(c.String()) + } else { + ecsNames[i] = strings.ToUpper("CAP_" + c.String()) + } + } + + return func(b int) (string, error) { + s, ok := ecsNames[b] + if !ok { + return "", errInvalidCapability + } + return s, nil + } +} + +// Like isAll(), but for the empty set, here for symmetry. +func isEmpty(flag Flag, set *cap.Set) (bool, error) { + return isEqual(flag, set, cap.NewSet()) +} diff --git a/libbeat/common/capabilities/capabilities_linux_test.go b/libbeat/common/capabilities/capabilities_linux_test.go new file mode 100644 index 000000000000..1481fc5679b2 --- /dev/null +++ b/libbeat/common/capabilities/capabilities_linux_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package capabilities + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "kernel.org/pub/linux/libs/security/libcap/cap" +) + +func TestEmpty(t *testing.T) { + sl, err := FromString("0", 16) + assert.Nil(t, err) + assert.Equal(t, len(sl), 0) + + sl, err = FromUint64(0) + assert.Nil(t, err) + assert.Equal(t, len(sl), 0) + + // assumes non root has no capabilities + if os.Geteuid() != 0 { + empty := cap.NewSet() + self := cap.GetProc() + d, err := self.Cf(empty) + assert.Nil(t, err) + assert.False(t, d.Has(cap.Effective)) + assert.False(t, d.Has(cap.Permitted)) + assert.False(t, d.Has(cap.Inheritable)) + } +} + +func TestOverflow(t *testing.T) { + sl, err := FromUint64(^uint64(0)) + assert.Nil(t, err) + assert.Equal(t, len(sl), 64) + + for _, cap := range []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_IPC_LOCK", + "CAP_MAC_OVERRIDE", + } { + assertHasCap(t, sl, cap) + } + if cap.MaxBits() <= 62 { + assertHasCap(t, sl, "CAP_62") + } + if cap.MaxBits() <= 63 { + assertHasCap(t, sl, "CAP_63") + } +} + +func assertHasCap(t *testing.T, sl []string, s string) { + var found int + + for _, s2 := range sl { + if s2 == s { + found++ + } + } + + assert.Equal(t, found, 1, s) +} diff --git a/libbeat/common/capabilities/capabilities_other.go b/libbeat/common/capabilities/capabilities_other.go new file mode 100644 index 000000000000..fbd7e8797728 --- /dev/null +++ b/libbeat/common/capabilities/capabilities_other.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !linux + +package capabilities + +import "errors" + +// Dummy value on "not linux". +type Flag = uint + +const ( + // Meaningless on "not linux". + Effective = Flag(0) + // Meaningless on "not linux". + Permitted = Flag(1) +) + +// Returns errors.ErrUnsupported on "not linux". +func FromPid(flag Flag, pid int) ([]string, error) { + return nil, errors.ErrUnsupported +} + +// Returns errors.ErrUnsupported on "not linux". +func FromUint64(w uint64) ([]string, error) { + return nil, errors.ErrUnsupported +} + +// Returns errors.ErrUnsupported on "not linux". +func FromString(s string, base int) ([]string, error) { + return nil, errors.ErrUnsupported +} diff --git a/libbeat/common/capabilities_linux.go b/libbeat/common/capabilities_linux.go deleted file mode 100644 index b2992c251ef8..000000000000 --- a/libbeat/common/capabilities_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build linux - -package common - -import ( - "errors" - "fmt" - - "github.com/elastic/go-sysinfo" - "github.com/elastic/go-sysinfo/types" -) - -// Capabilities contains the capability sets of a process -type Capabilities types.CapabilityInfo - -// Check performs a permission check for a given capabilities set -func (c Capabilities) Check(set []string) bool { - for _, capability := range set { - found := false - for _, effective := range c.Effective { - if capability == effective { - found = true - break - } - } - if !found { - return false - } - } - return true -} - -// GetCapabilities gets the capabilities of this process -func GetCapabilities() (Capabilities, error) { - p, err := sysinfo.Self() - if err != nil { - return Capabilities{}, fmt.Errorf("failed to read self process information: %w", err) - } - - if c, ok := p.(types.Capabilities); ok { - capabilities, err := c.Capabilities() - if err != nil { - return Capabilities{}, fmt.Errorf("failed to read process capabilities: %w", err) - } - return Capabilities(*capabilities), nil - } - - return Capabilities{}, errors.New("capabilities not available") -} diff --git a/libbeat/common/seccomp/policy_linux_386.go b/libbeat/common/seccomp/policy_linux_386.go index 724666987201..ac2a93a5c741 100644 --- a/libbeat/common/seccomp/policy_linux_386.go +++ b/libbeat/common/seccomp/policy_linux_386.go @@ -31,6 +31,7 @@ func init() { "_llseek", "access", "brk", + "capget", "chmod", "chown", "clock_gettime", diff --git a/libbeat/common/seccomp/policy_linux_amd64.go b/libbeat/common/seccomp/policy_linux_amd64.go index 0a05bdde9275..624f48c890a2 100644 --- a/libbeat/common/seccomp/policy_linux_amd64.go +++ b/libbeat/common/seccomp/policy_linux_amd64.go @@ -34,6 +34,7 @@ func init() { "arch_prctl", "bind", "brk", + "capget", "chmod", "chown", "clock_gettime", diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 47a6f1eaf23f..08da0875d41e 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> diff --git a/libbeat/ebpf/seccomp_linux.go b/libbeat/ebpf/seccomp_linux.go new file mode 100644 index 000000000000..9059eb0f6433 --- /dev/null +++ b/libbeat/ebpf/seccomp_linux.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package ebpf + +import ( + "runtime" + + "github.com/elastic/beats/v7/libbeat/common/seccomp" +) + +func init() { + switch runtime.GOARCH { + case "amd64": + syscalls := []string{ + "bpf", + "eventfd2", // needed by ringbuf + "perf_event_open", // needed by tracepoints + } + if err := seccomp.ModifyDefaultPolicy(seccomp.AddSyscall, syscalls...); err != nil { + panic(err) + } + } +} diff --git a/libbeat/ebpf/watcher_linux.go b/libbeat/ebpf/watcher_linux.go new file mode 100644 index 000000000000..e0da448d87a6 --- /dev/null +++ b/libbeat/ebpf/watcher_linux.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package ebpf + +import ( + "context" + "fmt" + "sync" + + "github.com/elastic/ebpfevents" +) + +var ( + gWatcherOnce sync.Once + gWatcher Watcher +) + +type client struct { + name string + mask EventMask + records chan ebpfevents.Record +} + +// EventMask is a mask of ebpfevents.EventType which is used to control which event types clients will receive. +type EventMask uint64 + +// Watcher observes kernel events, using ebpf probes from the ebpfevents library, and sends the +// events to subscribing clients. +// +// A single global watcher can exist, and can deliver events to multiple clients. Clients subscribe +// to the watcher, and all ebpf events that match their mask will be sent to their channel. +type Watcher struct { + sync.Mutex + cancel context.CancelFunc + loader *ebpfevents.Loader + clients map[string]client + status status + err error +} + +type status int + +const ( + stopped status = iota + started +) + +// GetWatcher creates the watcher, if required, and returns a reference to the global Watcher. +func GetWatcher() (*Watcher, error) { + gWatcher.Lock() + defer gWatcher.Unlock() + + // Try to load the probe once on startup so consumers can error out. + gWatcherOnce.Do(func() { + if gWatcher.status == stopped { + l, err := ebpfevents.NewLoader() + if err != nil { + gWatcher.err = fmt.Errorf("init ebpf loader: %w", err) + return + } + _ = l.Close() + } + }) + + return &gWatcher, gWatcher.err +} + +// Subscribe to receive events from the watcher. +func (w *Watcher) Subscribe(clientName string, events EventMask) <-chan ebpfevents.Record { + w.Lock() + defer w.Unlock() + + if w.status == stopped { + w.startLocked() + } + + w.clients[clientName] = client{ + name: clientName, + mask: events, + records: make(chan ebpfevents.Record, w.loader.BufferLen()), + } + + return w.clients[clientName].records +} + +// Unsubscribe the client with the given name. +func (w *Watcher) Unsubscribe(clientName string) { + w.Lock() + defer w.Unlock() + + delete(w.clients, clientName) + + if w.nclients() == 0 { + w.stopLocked() + } +} + +func (w *Watcher) startLocked() { + if w.status == started { + return + } + + loader, err := ebpfevents.NewLoader() + if err != nil { + w.err = fmt.Errorf("start ebpf loader: %w", err) + return + } + + w.loader = loader + w.clients = make(map[string]client) + + records := make(chan ebpfevents.Record, loader.BufferLen()) + var ctx context.Context + ctx, w.cancel = context.WithCancel(context.Background()) + + go w.loader.EventLoop(ctx, records) + go func(ctx context.Context) { + for { + select { + case record := <-records: + if record.Error != nil { + for _, client := range w.clients { + client.records <- record + } + continue + } + for _, client := range w.clients { + if client.mask&EventMask(record.Event.Type) != 0 { + client.records <- record + } + } + continue + case <-ctx.Done(): + return + } + } + }(ctx) + + w.status = started +} + +func (w *Watcher) stopLocked() { + if w.status == stopped { + return + } + w.close() + w.status = stopped +} + +func (w *Watcher) nclients() int { + return len(w.clients) +} + +func (w *Watcher) close() { + if w.cancel != nil { + w.cancel() + } + + if w.loader != nil { + _ = w.loader.Close() + } + + for _, cl := range w.clients { + close(cl.records) + } +} diff --git a/libbeat/ebpf/watcher_test.go b/libbeat/ebpf/watcher_test.go new file mode 100644 index 000000000000..13d27ffd52c0 --- /dev/null +++ b/libbeat/ebpf/watcher_test.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package ebpf + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) + +const allEvents = EventMask(math.MaxUint64) + +func TestWatcherStartStop(t *testing.T) { + w, err := GetWatcher() + if err != nil { + t.Skipf("skipping ebpf watcher test: %v", err) + } + assert.Equal(t, gWatcher.status, stopped) + assert.Equal(t, 0, gWatcher.nclients()) + + _ = w.Subscribe("test-1", allEvents) + assert.Equal(t, gWatcher.status, started) + assert.Equal(t, 1, gWatcher.nclients()) + + _ = w.Subscribe("test-2", allEvents) + assert.Equal(t, 2, gWatcher.nclients()) + + w.Unsubscribe("test-2") + assert.Equal(t, 1, gWatcher.nclients()) + + w.Unsubscribe("dummy") + assert.Equal(t, 1, gWatcher.nclients()) + + assert.Equal(t, gWatcher.status, started) + w.Unsubscribe("test-1") + assert.Equal(t, 0, gWatcher.nclients()) + assert.Equal(t, gWatcher.status, stopped) + + _ = w.Subscribe("new", allEvents) + assert.Equal(t, 1, gWatcher.nclients()) + assert.Equal(t, gWatcher.status, started) + w.Unsubscribe("new") +} diff --git a/libbeat/management/management.go b/libbeat/management/management.go index 88faa48f5408..177642b33988 100644 --- a/libbeat/management/management.go +++ b/libbeat/management/management.go @@ -82,9 +82,12 @@ type Manager interface { // // Calls to 'CheckRawConfig()' or 'SetPayload()' will be ignored after calling stop. // - // Note: Stop will not call 'UnregisterAction()' automaticallty. + // Note: Stop will not call 'UnregisterAction()' automatically. Stop() + // AgentInfo returns the information of the agent to which the manager is connected. + AgentInfo() client.AgentInfo + // SetStopCallback accepts a function that need to be called when the manager want to shutdown the // beats. This is needed when you want your beats to be gracefully shutdown remotely by the Elastic Agent // when a policy doesn't need to run this beat. @@ -190,6 +193,7 @@ func (n *fallbackManager) Stop() { // but that does not mean the Beat is being managed externally, // hence it will always return false. func (n *fallbackManager) Enabled() bool { return false } +func (n *fallbackManager) AgentInfo() client.AgentInfo { return client.AgentInfo{} } func (n *fallbackManager) Start() error { return nil } func (n *fallbackManager) CheckRawConfig(cfg *config.C) error { return nil } func (n *fallbackManager) RegisterAction(action client.Action) {} diff --git a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc index 9e61cac2e8cf..c6dbdd5600a9 100644 --- a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc +++ b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc @@ -21,6 +21,11 @@ The following cloud providers are supported: - Openstack Nova - Hetzner Cloud +NOTE: `huawei` is an alias for `openstack`. Huawei cloud runs on OpenStack platform, and when +viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you know that your +deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, you can achieve +this by overwriting the value using an `add_fields` processor. + The Alibaba Cloud and Tencent cloud providers are disabled by default, because they require to access a remote host. The `providers` setting allows users to select a list of default providers to query. @@ -53,10 +58,9 @@ List of names the `providers` setting supports: - "digitalocean" for Digital Ocean (enabled by default). - "aws", or "ec2" for Amazon Web Services (enabled by default). - "gcp" for Google Copmute Enging (enabled by default). -- "openstack", or "nova" for Openstack Nova (enabled by default). +- "openstack", "nova", or "huawei" for Openstack Nova (enabled by default). - "openstack-ssl", or "nova-ssl" for Openstack Nova when SSL metadata APIs are enabled (enabled by default). - "tencent", or "qcloud" for Tencent Cloud (disabled by default). -- "huawei" for Huawei Cloud (enabled by default). - "hetzner" for Hetzner Cloud (enabled by default). The third optional configuration setting is `overwrite`. When `overwrite` is @@ -128,20 +132,6 @@ _Tencent Cloud_ } ------------------------------------------------------------------------------- -_Huawei Cloud_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "availability_zone": "cn-east-2b", - "instance.id": "37da9890-8289-4c58-ba34-a8271c4a8216", - "provider": "huawei", - "region": "cn-east-2" - } -} -------------------------------------------------------------------------------- - _Alibaba Cloud_ This metadata is only available when VPC is selected as the network type of the diff --git a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go b/libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go deleted file mode 100644 index 36683e74a134..000000000000 --- a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package add_cloud_metadata - -import ( - "encoding/json" - - conf "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -type hwMeta struct { - ImageName string `json:"image_name"` - VpcID string `json:"vpc_id"` -} - -type hwMetadata struct { - UUID string `json:"uuid"` - AvailabilityZone string `json:"availability_zone"` - RegionID string `json:"region_id"` - Meta *hwMeta `json:"meta"` - ProjectID string `json:"project_id"` - Name string `json:"name"` -} - -// Huawei Cloud Metadata Service -// Document https://support.huaweicloud.com/usermanual-ecs/ecs_03_0166.html -var huaweiMetadataFetcher = provider{ - Name: "huawei-cloud", - - Local: true, - - Create: func(_ string, c *conf.C) (metadataFetcher, error) { - metadataHost := "169.254.169.254" - huaweiCloudMetadataJSONURI := "/openstack/latest/meta_data.json" - - huaweiCloudSchema := func(m map[string]interface{}) mapstr.M { - m["service"] = mapstr.M{ - "name": "ECS", - } - return mapstr.M{"cloud": m} - } - - urls, err := getMetadataURLs(c, metadataHost, []string{ - huaweiCloudMetadataJSONURI, - }) - if err != nil { - return nil, err - } - responseHandlers := map[string]responseHandler{ - urls[0]: func(all []byte, result *result) error { - data := new(hwMetadata) - err := json.Unmarshal(all, data) - if err != nil { - return err - } - result.metadata.Put("instance.id", data.UUID) - result.metadata.Put("region", data.RegionID) - result.metadata.Put("availability_zone", data.AvailabilityZone) - return nil - }, - } - fetcher := &httpMetadataFetcher{"huawei", nil, responseHandlers, huaweiCloudSchema} - return fetcher, nil - }, -} diff --git a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go b/libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go deleted file mode 100644 index 0ae6fc332f09..000000000000 --- a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package add_cloud_metadata - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/beat" - conf "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -func initHuaweiCloudTestServer() *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/openstack/latest/meta_data.json" { - w.Write([]byte(`{ - "random_seed": "CWIZtYK4y5pzMtShTtCKx16qB1DsA/2kL0US4u1fHxedODNr7gos4RgdE/z9eHucnltnlJfDY1remfGL60yzTsvEIWPdECOpPaJm1edIYQaUvQzdeQwKcOQAHjUP5wLQzGA3j3Pw10p7u+M7glHEwNRoEY1WsbVYwzyOOkBnqb+MJ1aOhiRnfNtHOxjLNBSDvjHaQZzoHL+1YNAxDYFezE83nE2m3ciVwZO7xWpdKDQ+W5hYBUsYAWODRMOYqIR/5ZLsfAfxE2DhK+NvuMyJ5yjO+ObQf0DN5nRUSrM5ajs84UVMr9ylJuT78ckh83CLSttsjzXJ+sr07ZFsB6/6NABzziFL7Xn8z/mEBVmFXBiBgg7KcWSoH756w42VSdUezwTy9lW0spRmdvNBKV/PzrYyy0FMiGXXZwMOCyBD05CBRJlsPorwxZLlfRVmNvsTuMYB8TG3UUbFhoR8Bd5en+EC3ncH3QIUDWn0oVg28BVjWe5rADVQLX1h83ti6GD08YUGaxoNPXnJLZfiaucSacby2mG31xysxd8Tg0qPRq7744a1HPVryuauWR9pF0+qDmtskhenxK0FR+TQ4w0fRxTigteBsXx1pQu0iz+B8rP68uokU2faCC2IMHY2Tf9RPCe6Eef0/DdQhBft88PuJLwq52o/0qZ/n9HFL6LdgCU=", - "uuid": "37da9890-8289-4c58-ba34-a8271c4a8216", - "availability_zone": "cn-east-2b", - "enterprise_project_id": "0", - "launch_index": 0, - "instance_type": "c3.large.2", - "meta": { - "os_bit": "64", - "image_name": "CentOS 7.4", - "vpc_id": "6dad7f50-db1d-4cce-b095-d27bc837d4bb" - }, - "region_id": "cn-east-2", - "project_id": "c09b8baf28b845a9b53ed37575cfd61f", - "name": "hwdev-test-1" - }`)) - return - } - - http.Error(w, "not found", http.StatusNotFound) - })) -} - -func TestRetrieveHuaweiCloudMetadata(t *testing.T) { - logp.TestingSetup() - - server := initHuaweiCloudTestServer() - defer server.Close() - - config, err := conf.NewConfigFrom(map[string]interface{}{ - "providers": []string{"huawei"}, - "host": server.Listener.Addr().String(), - }) - - if err != nil { - t.Fatal(err) - } - - p, err := New(config) - if err != nil { - t.Fatal(err) - } - - actual, err := p.Run(&beat.Event{Fields: mapstr.M{}}) - if err != nil { - t.Fatal(err) - } - - expected := mapstr.M{ - "cloud": mapstr.M{ - "provider": "huawei", - "instance": mapstr.M{ - "id": "37da9890-8289-4c58-ba34-a8271c4a8216", - }, - "region": "cn-east-2", - "availability_zone": "cn-east-2b", - "service": mapstr.M{ - "name": "ECS", - }, - }, - } - assert.Equal(t, expected, actual.Fields) -} diff --git a/libbeat/processors/add_cloud_metadata/providers.go b/libbeat/processors/add_cloud_metadata/providers.go index 55e68f756071..77c4c7042add 100644 --- a/libbeat/processors/add_cloud_metadata/providers.go +++ b/libbeat/processors/add_cloud_metadata/providers.go @@ -64,7 +64,7 @@ var cloudMetaProviders = map[string]provider{ "nova-ssl": openstackNovaSSLMetadataFetcher, "qcloud": qcloudMetadataFetcher, "tencent": qcloudMetadataFetcher, - "huawei": huaweiMetadataFetcher, + "huawei": openstackNovaMetadataFetcher, "hetzner": hetznerMetadataFetcher, } diff --git a/libbeat/processors/add_host_metadata/add_host_metadata.go b/libbeat/processors/add_host_metadata/add_host_metadata.go index db3cbbc5ee30..5fe28194b555 100644 --- a/libbeat/processors/add_host_metadata/add_host_metadata.go +++ b/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -18,6 +18,7 @@ package add_host_metadata import ( + "context" "fmt" "sync" "time" @@ -25,6 +26,7 @@ import ( "github.com/gofrs/uuid" "github.com/elastic/elastic-agent-libs/monitoring" + "github.com/elastic/go-sysinfo" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/features" @@ -35,7 +37,6 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-system-metrics/metric/system/host" - "github.com/elastic/go-sysinfo" ) const processorName = "add_host_metadata" @@ -96,7 +97,7 @@ func New(cfg *config.C) (beat.Processor, error) { } // create a unique ID for this instance of the processor - cbIDStr := "" + var cbIDStr string cbID, err := uuid.NewV4() // if we fail, fall back to the processor name, hope for the best. if err != nil { @@ -178,7 +179,10 @@ func (p *addHostMetadata) loadData(checkCache bool, useFQDN bool) error { hostname := h.Info().Hostname if useFQDN { - fqdn, err := h.FQDN() + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + fqdn, err := h.FQDNWithContext(ctx) if err != nil { // FQDN lookup is "best effort". If it fails, we monitor the failure, fallback to // the OS-reported hostname, and move on. diff --git a/libbeat/processors/fingerprint/config.go b/libbeat/processors/fingerprint/config.go index dc36b6bceffb..2f31691e7414 100644 --- a/libbeat/processors/fingerprint/config.go +++ b/libbeat/processors/fingerprint/config.go @@ -17,13 +17,15 @@ package fingerprint +import "encoding/json" + // Config for fingerprint processor. type Config struct { - Method hashMethod `config:"method"` // Hash function to use for fingerprinting - Fields []string `config:"fields" validate:"required"` // Source fields to compute fingerprint from - TargetField string `config:"target_field"` // Target field for the fingerprint - Encoding encodingMethod `config:"encoding"` // Encoding to use for target field value - IgnoreMissing bool `config:"ignore_missing"` // Ignore missing fields? + Method namedHashMethod `config:"method"` // Hash function to use for fingerprinting + Fields []string `config:"fields" validate:"required"` // Source fields to compute fingerprint from + TargetField string `config:"target_field"` // Target field for the fingerprint + Encoding namedEncodingMethod `config:"encoding"` // Encoding to use for target field value + IgnoreMissing bool `config:"ignore_missing"` // Ignore missing fields? } func defaultConfig() Config { @@ -34,3 +36,16 @@ func defaultConfig() Config { IgnoreMissing: false, } } + +func (c *Config) MarshalJSON() ([]byte, error) { + type Alias Config + return json.Marshal(&struct { + Method string + Encoding string + *Alias + }{ + Method: c.Method.Name, + Encoding: c.Encoding.Name, + Alias: (*Alias)(c), + }) +} diff --git a/libbeat/processors/fingerprint/encode.go b/libbeat/processors/fingerprint/encode.go index 843c7bd5d293..dd04068df732 100644 --- a/libbeat/processors/fingerprint/encode.go +++ b/libbeat/processors/fingerprint/encode.go @@ -24,16 +24,26 @@ import ( "strings" ) +type namedEncodingMethod struct { + Name string + Encode encodingMethod +} type encodingMethod func([]byte) string -var encodings = map[string]encodingMethod{ - "hex": hex.EncodeToString, - "base32": base32.StdEncoding.EncodeToString, - "base64": base64.StdEncoding.EncodeToString, +var encodings = map[string]namedEncodingMethod{} + +func init() { + for _, e := range []namedEncodingMethod{ + {Name: "hex", Encode: hex.EncodeToString}, + {Name: "base32", Encode: base32.StdEncoding.EncodeToString}, + {Name: "base64", Encode: base64.StdEncoding.EncodeToString}, + } { + encodings[e.Name] = e + } } // Unpack creates the encodingMethod from the given string -func (e *encodingMethod) Unpack(str string) error { +func (e *namedEncodingMethod) Unpack(str string) error { str = strings.ToLower(str) m, found := encodings[str] diff --git a/libbeat/processors/fingerprint/fingerprint.go b/libbeat/processors/fingerprint/fingerprint.go index 3f22082bad42..fdbcf158b27c 100644 --- a/libbeat/processors/fingerprint/fingerprint.go +++ b/libbeat/processors/fingerprint/fingerprint.go @@ -60,7 +60,7 @@ func New(cfg *config.C) (beat.Processor, error) { p := &fingerprint{ config: config, - hash: config.Method, + hash: config.Method.Hash, fields: fields, } @@ -75,7 +75,7 @@ func (p *fingerprint) Run(event *beat.Event) (*beat.Event, error) { return nil, makeErrComputeFingerprint(err) } - encodedHash := p.config.Encoding(hashFn.Sum(nil)) + encodedHash := p.config.Encoding.Encode(hashFn.Sum(nil)) if _, err := event.PutValue(p.config.TargetField, encodedHash); err != nil { return nil, makeErrComputeFingerprint(err) @@ -85,8 +85,7 @@ func (p *fingerprint) Run(event *beat.Event) (*beat.Event, error) { } func (p *fingerprint) String() string { - //nolint:staticcheck // https://github.com/elastic/beats/issues/35174 - json, _ := json.Marshal(p.config) + json, _ := json.Marshal(&p.config) return procName + "=" + string(json) } diff --git a/libbeat/processors/fingerprint/fingerprint_test.go b/libbeat/processors/fingerprint/fingerprint_test.go index ead0bc2c0055..5f6bdb70b5ed 100644 --- a/libbeat/processors/fingerprint/fingerprint_test.go +++ b/libbeat/processors/fingerprint/fingerprint_test.go @@ -18,6 +18,7 @@ package fingerprint import ( + "fmt" "math/rand" "strconv" "testing" @@ -77,6 +78,7 @@ func TestWithConfig(t *testing.T) { Fields: test.input.Clone(), } newEvent, err := p.Run(testEvent) + assert.NoError(t, err) v, err := newEvent.GetValue("fingerprint") assert.NoError(t, err) assert.Equal(t, test.want, v) @@ -459,6 +461,18 @@ func TestIgnoreMissing(t *testing.T) { } } +func TestProcessorStringer(t *testing.T) { + testConfig, err := config.NewConfigFrom(mapstr.M{ + "fields": []string{"field1"}, + "encoding": "hex", + "method": "md5", + }) + require.NoError(t, err) + p, err := New(testConfig) + require.NoError(t, err) + require.Equal(t, `fingerprint={"Method":"md5","Encoding":"hex","Fields":["field1"],"TargetField":"fingerprint","IgnoreMissing":false}`, fmt.Sprint(p)) +} + func BenchmarkHashMethods(b *testing.B) { events := nRandomEvents(100000) @@ -472,8 +486,8 @@ func BenchmarkHashMethods(b *testing.B) { b.Run(method, func(b *testing.B) { b.ResetTimer() - for _, e := range events { - _, err := p.Run(&e) + for i := range events { + _, err := p.Run(&events[i]) if err != nil { b.Fatal(err) } @@ -491,7 +505,7 @@ func nRandomEvents(num int) []beat.Event { charsetLen := len(charset) b := make([]byte, 200) - var events []beat.Event + events := make([]beat.Event, num) for i := 0; i < num; i++ { for j := range b { b[j] = charset[prng.Intn(charsetLen)] diff --git a/libbeat/processors/fingerprint/hash.go b/libbeat/processors/fingerprint/hash.go index 1c4af0d0161a..1c8cf146a147 100644 --- a/libbeat/processors/fingerprint/hash.go +++ b/libbeat/processors/fingerprint/hash.go @@ -28,19 +28,29 @@ import ( "github.com/cespare/xxhash/v2" ) +type namedHashMethod struct { + Name string + Hash hashMethod +} type hashMethod func() hash.Hash -var hashes = map[string]hashMethod{ - "md5": md5.New, - "sha1": sha1.New, - "sha256": sha256.New, - "sha384": sha512.New384, - "sha512": sha512.New, - "xxhash": newXxHash, +var hashes = map[string]namedHashMethod{} + +func init() { + for _, h := range []namedHashMethod{ + {Name: "md5", Hash: md5.New}, + {Name: "sha1", Hash: sha1.New}, + {Name: "sha256", Hash: sha256.New}, + {Name: "sha384", Hash: sha512.New384}, + {Name: "sha512", Hash: sha512.New}, + {Name: "xxhash", Hash: newXxHash}, + } { + hashes[h.Name] = h + } } // Unpack creates the hashMethod from the given string -func (f *hashMethod) Unpack(str string) error { +func (f *namedHashMethod) Unpack(str string) error { str = strings.ToLower(str) m, found := hashes[str] diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index 100ccd3f0137..4360aa0c1927 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -46,7 +46,7 @@ export PATH := ./bin:$(PATH) GOFILES = $(shell find . -type f -name '*.go' 2>/dev/null) GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "*/vendor/*" 2>/dev/null) GOFILES_ALL = $(GOFILES) $(shell find $(ES_BEATS) -type f -name '*.go' 2>/dev/null) -GOPACKAGES_STRESSTESTS=$(shell find . -name '*.go' 2>/dev/null | xargs grep -l '\+build.*stresstest' | xargs -n1 dirname | uniq) +GOPACKAGES_STRESSTESTS=$(shell find . -type d \( -name "stress" \) 2>/dev/null) SHELL=bash ES_HOST?=elasticsearch ES_PORT?=9200 @@ -87,7 +87,7 @@ SYSTEM_TESTS?=false ## @testing if true, "make test" and "make testsuite" run un STRESS_TESTS?=false ## @testing if true, "make test" and "make testsuite" run also run the stress tests STRESS_TEST_OPTIONS?=-timeout=20m -race -v GOX_OS?=linux darwin windows freebsd netbsd openbsd ## @Building List of all OS to be supported by "make crosscompile". -GOX_OSARCH?=!darwin/arm !darwin/arm64 ## @building Space separated list of GOOS/GOARCH pairs to build by "make crosscompile". +GOX_OSARCH?=!darwin/arm !darwin/386 !linux/386 !windows/386 !freebsd/386 !netbsd/386 !openbsd/386 !linux/ppc64 ## @building Space-separated list of GOOS/GOARCH pairs to exclude (unsupported by GO and generated by GOX) in the "make crosscompile" build. GOX_FLAGS?= ## @building Additional flags to append to the gox command used by "make crosscompile". # XXX: Should be switched back to `snapshot` once the Elasticsearch # snapshots are working. https://github.com/elastic/beats/pull/6416 diff --git a/libbeat/tests/integration/cmd_keystore_test.go b/libbeat/tests/integration/cmd_keystore_test.go index eb4b697cafa2..efb9b91a1c92 100644 --- a/libbeat/tests/integration/cmd_keystore_test.go +++ b/libbeat/tests/integration/cmd_keystore_test.go @@ -100,19 +100,23 @@ func TestKeystoreRemoveMultipleExistingKeys(t *testing.T) { mockbeat.Stop() mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err := mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key2", "--stdin") - fmt.Fprintf(os.Stdin, "pass2") + fmt.Fprintf(mockbeat.stdin, "pass2") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key3", "--stdin") - fmt.Fprintf(os.Stdin, "pass3") + fmt.Fprintf(mockbeat.stdin, "pass3") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") @@ -138,19 +142,22 @@ func TestKeystoreList(t *testing.T) { mockbeat.Stop() mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err := mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key2", "--stdin") - fmt.Fprintf(os.Stdin, "pass2") + fmt.Fprintf(mockbeat.stdin, "pass2") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key3", "--stdin") - fmt.Fprintf(os.Stdin, "pass3") + fmt.Fprintf(mockbeat.stdin, "pass3") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") @@ -186,7 +193,8 @@ func TestKeystoreAddSecretFromStdin(t *testing.T) { require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") @@ -202,13 +210,15 @@ func TestKeystoreUpdateForce(t *testing.T) { require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key1", "--force", "--stdin") - fmt.Fprintf(os.Stdin, "pass2") + fmt.Fprintf(mockbeat.stdin, "pass2") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 046c578d7cd7..9657fbaeaff4 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -30,6 +30,7 @@ import ( "net/http" "net/url" "os" + "os/exec" "path/filepath" "regexp" "strings" @@ -55,6 +56,7 @@ type BeatProc struct { logFileOffset int64 t *testing.T tempDir string + stdin io.WriteCloser stdout *os.File stderr *os.File Process *os.Process @@ -90,7 +92,7 @@ type Total struct { Value int `json:"value"` } -// NewBeat createa a new Beat process from the system tests binary. +// NewBeat creates a new Beat process from the system tests binary. // It sets some required options like the home path, logging, etc. // `tempDir` will be used as home and logs directory for the Beat // `args` will be passed as CLI arguments to the Beat @@ -98,10 +100,12 @@ func NewBeat(t *testing.T, beatName, binary string, args ...string) *BeatProc { require.FileExistsf(t, binary, "beat binary must exists") tempDir := createTempDir(t) configFile := filepath.Join(tempDir, beatName+".yml") + stdoutFile, err := os.Create(filepath.Join(tempDir, "stdout")) require.NoError(t, err, "error creating stdout file") stderrFile, err := os.Create(filepath.Join(tempDir, "stderr")) require.NoError(t, err, "error creating stderr file") + p := BeatProc{ Binary: binary, baseArgs: append([]string{ @@ -213,15 +217,27 @@ func (b *BeatProc) Start(args ...string) { func (b *BeatProc) startBeat() { b.cmdMutex.Lock() defer b.cmdMutex.Unlock() + _, _ = b.stdout.Seek(0, 0) _ = b.stdout.Truncate(0) _, _ = b.stderr.Seek(0, 0) _ = b.stderr.Truncate(0) - var procAttr os.ProcAttr - procAttr.Files = []*os.File{os.Stdin, b.stdout, b.stderr} - process, err := os.StartProcess(b.fullPath, b.Args, &procAttr) + + cmd := exec.Cmd{ + Path: b.fullPath, + Args: b.Args, + Stdout: b.stdout, + Stderr: b.stderr, + } + + var err error + b.stdin, err = cmd.StdinPipe() + require.NoError(b.t, err, "could not get cmd StdinPipe") + + err = cmd.Start() require.NoError(b.t, err, "error starting beat process") - b.Process = process + + b.Process = cmd.Process } // waitBeatToExit blocks until the Beat exits, it returns @@ -515,6 +531,10 @@ func (b *BeatProc) LoadMeta() (Meta, error) { return m, nil } +func (b *BeatProc) Stdin() io.WriteCloser { + return b.stdin +} + func GetESURL(t *testing.T, scheme string) url.URL { t.Helper() diff --git a/libbeat/tests/integration/mockserver.go b/libbeat/tests/integration/mockserver.go index 0a396cb78399..763467819fa2 100644 --- a/libbeat/tests/integration/mockserver.go +++ b/libbeat/tests/integration/mockserver.go @@ -38,18 +38,18 @@ type unitKey struct { } // NewMockServer creates a GRPC server to mock the Elastic-Agent. -// On the first check in call it will send the first element of `unit` +// On the first check-in call it will send the first element of `unit` // as the expected unit, on successive calls, if the Beat has reached // that state, it will move on to sending the next state. // It will also validate the features. // // if `observedCallback` is not nil, it will be called on every -// check in receiving the `proto.CheckinObserved` sent by the +// check-in receiving the `proto.CheckinObserved` sent by the // Beat and index from `units` that was last sent to the Beat. // // If `delay` is not zero, when the Beat state matches the last // sent units, the server will wait for `delay` before sending the -// the next state. This will block the check in call from the Beat. +// next state. This will block the check-in call from the Beat. func NewMockServer( units [][]*proto.UnitExpected, featuresIdxs []uint64, @@ -58,7 +58,7 @@ func NewMockServer( delay time.Duration, ) *mock.StubServerV2 { i := 0 - agentInfo := &proto.CheckinAgentInfo{ + agentInfo := &proto.AgentInfo{ Id: "elastic-agent-id", Version: version.GetDefaultVersion(), Snapshot: true, diff --git a/libbeat/version/helper.go b/libbeat/version/helper.go index 5ed206d8a6c0..92b2ed2cb4cd 100644 --- a/libbeat/version/helper.go +++ b/libbeat/version/helper.go @@ -17,23 +17,36 @@ package version -import "time" +import ( + "sync/atomic" + "time" +) + +var ( + packageVersion atomic.Value + buildTime = "unknown" + commit = "unknown" + qualifier = "" +) -// GetDefaultVersion returns the current libbeat version. -// This method is in a separate file as the version.go file is auto generated +// GetDefaultVersion returns the current version. +// If running in stand-alone mode, it's the libbeat version. If running in +// managed mode, a.k.a under the agent, it's the package version set using +// SetPackageVersion. If SetPackageVersion haven't been called, it reports the +// libbeat version +// +// This method is in a separate file as the version.go file is auto-generated. func GetDefaultVersion() string { + if v, ok := packageVersion.Load().(string); ok && v != "" { + return v + } + if qualifier == "" { return defaultBeatVersion } return defaultBeatVersion + "-" + qualifier } -var ( - buildTime = "unknown" - commit = "unknown" - qualifier = "" -) - // BuildTime exposes the compile-time build time information. // It will represent the zero time instant if parsing fails. func BuildTime() time.Time { @@ -48,3 +61,10 @@ func BuildTime() time.Time { func Commit() string { return commit } + +// SetPackageVersion sets the package version, overriding the defaultBeatVersion. +func SetPackageVersion(version string) { + // Currently, the Elastic Agent does not perform any validation on the + // package version, therefore, no validation is done here either. + packageVersion.Store(version) +} diff --git a/metricbeat/docs/modules/oracle.asciidoc b/metricbeat/docs/modules/oracle.asciidoc index f524967cce5d..3436caa9cc26 100644 --- a/metricbeat/docs/modules/oracle.asciidoc +++ b/metricbeat/docs/modules/oracle.asciidoc @@ -60,19 +60,24 @@ Then, Metricbeat can be launched. *Host Configuration* -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. Old style host configuration for backwards compatibility: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -DSN host configuration is the recommended way to configure the Oracle Metricbeat Module as it supports the usage of special characters in the password. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. [float] == Metricsets diff --git a/metricbeat/docs/modules/sql.asciidoc b/metricbeat/docs/modules/sql.asciidoc index 9c27c0bc4ba5..d8e0e15b617d 100644 --- a/metricbeat/docs/modules/sql.asciidoc +++ b/metricbeat/docs/modules/sql.asciidoc @@ -871,19 +871,26 @@ Then, Metricbeat can be launched. ===== Host Configuration for Oracle -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. DSN host configuration as URL: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -The username and password to connect to the database can be provided as values to `username` and `password` keys of `sql.yml`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. + +The username and password to connect to the database can be provided as values to the `username` and `password` keys of `sql.yml`. [source,yml] ---- @@ -901,6 +908,7 @@ The username and password to connect to the database can be provided as values t response_format: variables ---- + :edit_url: [float] diff --git a/metricbeat/helper/socket/ptable_linux.go b/metricbeat/helper/socket/ptable_linux.go index 88fff488bc22..ffe585f70949 100644 --- a/metricbeat/helper/socket/ptable_linux.go +++ b/metricbeat/helper/socket/ptable_linux.go @@ -20,17 +20,22 @@ package socket import ( - "github.com/elastic/beats/v7/libbeat/common" + "kernel.org/pub/linux/libs/security/libcap/cap" ) -var requiredCapabilities = []string{"sys_ptrace", "dac_read_search"} - // isPrivileged checks if this process has privileges to read sockets // of all users func isPrivileged() (bool, error) { - capabilities, err := common.GetCapabilities() + set := cap.GetProc() + + ptrace, err := set.GetFlag(cap.Effective, cap.SYS_PTRACE) + if err != nil { + return false, err + } + dac_read_search, err := set.GetFlag(cap.Effective, cap.DAC_READ_SEARCH) if err != nil { return false, err } - return capabilities.Check(requiredCapabilities), nil + + return ptrace && dac_read_search, nil } diff --git a/metricbeat/module/mongodb/replstatus/info.go b/metricbeat/module/mongodb/replstatus/info.go index 037aeda09502..a444fa03b1ff 100644 --- a/metricbeat/module/mongodb/replstatus/info.go +++ b/metricbeat/module/mongodb/replstatus/info.go @@ -21,11 +21,10 @@ import ( "context" "errors" "fmt" + "time" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" ) type oplogInfo struct { @@ -71,14 +70,9 @@ func getReplicationInfo(client *mongo.Client) (*oplogInfo, error) { } // get first and last items in the oplog - firstTs, err := getOpTimestamp(collection, "$natural") + firstTs, lastTs, err := getOpTimestamp(collection) if err != nil { - return nil, fmt.Errorf("could not get first operation timestamp in op log: %w", err) - } - - lastTs, err := getOpTimestamp(collection, "-$natural") - if err != nil { - return nil, fmt.Errorf("could not get last operation timestamp in op log: %w", err) + return nil, fmt.Errorf("could not get operation timestamp in op log: %w", err) } diff := lastTs - firstTs @@ -92,28 +86,35 @@ func getReplicationInfo(client *mongo.Client) (*oplogInfo, error) { }, nil } -func getOpTimestamp(collection *mongo.Collection, sort string) (uint32, error) { - opt := options.Find().SetSort(bson.D{{Key: sort, Value: 1}}) - cursor, err := collection.Find(context.Background(), bson.D{}, opt) - if err != nil { - return 0, fmt.Errorf("could not get cursor on collection '%s': %w", collection.Name(), err) +func getOpTimestamp(collection *mongo.Collection) (uint32, uint32, error) { + + // Find both first and last timestamps using $min and $max + pipeline := bson.A{ + bson.M{"$group": bson.M{"_id": 1, "minTS": bson.M{"$min": "$ts"}, "maxTS": bson.M{"$max": "$ts"}}}, } - if !cursor.Next(context.Background()) { - return 0, errors.New("objects not found in local.oplog.rs") + cursor, err := collection.Aggregate(context.Background(), pipeline) + if err != nil { + return 0, 0, fmt.Errorf("could not get operation timestamps in op log: %w", err) } + defer cursor.Close(context.Background()) - var opTime map[string]interface{} - if err = cursor.Decode(&opTime); err != nil { - return 0, fmt.Errorf("error decoding response: %w", err) + var result struct { + MinTS time.Time `bson:"minTS"` + MaxTS time.Time `bson:"maxTS"` } - ts, ok := opTime["ts"].(primitive.Timestamp) - if !ok { - return 0, errors.New("an expected timestamp was not found") + if !cursor.Next(context.Background()) { + return 0, 0, errors.New("no documents found in op log") } + if err := cursor.Decode(&result); err != nil { + return 0, 0, fmt.Errorf("error decoding response for timestamps: %w", err) + } + + minTS := uint32(result.MinTS.Unix()) + maxTS := uint32(result.MaxTS.Unix()) - return ts.T, nil + return minTS, maxTS, nil } func contains(s []string, x string) bool { diff --git a/metricbeat/tests/system/test_reload.py b/metricbeat/tests/system/test_reload.py index 29d82bbf82b2..99aa8e2c2f27 100644 --- a/metricbeat/tests/system/test_reload.py +++ b/metricbeat/tests/system/test_reload.py @@ -42,7 +42,8 @@ def test_reload(self): self.wait_until(lambda: self.output_lines() > 0) proc.check_kill_and_wait() - @unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os") + # windows is disabled, see https://github.com/elastic/beats/issues/37841 + @unittest.skipUnless(re.match("(?i)linux|darwin|freebsd|openbsd", sys.platform), "os") def test_start_stop(self): """ Test if module is properly started and stopped diff --git a/packetbeat/_meta/config/beat.reference.yml.tmpl b/packetbeat/_meta/config/beat.reference.yml.tmpl index 649ec0e8deea..033aa1e51063 100644 --- a/packetbeat/_meta/config/beat.reference.yml.tmpl +++ b/packetbeat/_meta/config/beat.reference.yml.tmpl @@ -78,6 +78,11 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Packetbeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#packetbeat.overwrite_pipelines: false + {{- template "windows_npcap.yml.tmpl" .}} {{header "Flows"}} diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index 725f3eebc33d..d8c223f17892 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -25,13 +25,16 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/reload" + "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/monitoring/inputmon" + "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/service" "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/module" "github.com/elastic/beats/v7/packetbeat/protos" // Add packetbeat default processors @@ -80,10 +83,11 @@ func initialConfig() config.Config { // Beater object. Contains all objects needed to run the beat type packetbeat struct { - config *conf.C - factory *processorFactory - done chan struct{} - stopOnce sync.Once + config *conf.C + factory *processorFactory + overwritePipelines bool + done chan struct{} + stopOnce sync.Once } // New returns a new Packetbeat beat.Beater. @@ -98,15 +102,35 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { return nil, err } + var overwritePipelines bool + if !b.Manager.Enabled() { + // Pipeline overwrite is only enabled on standalone packetbeat + // since pipelines are managed by fleet otherwise. + config, err := configurator(rawConfig) + if err != nil { + return nil, err + } + overwritePipelines = config.OverwritePipelines + b.OverwritePipelinesCallback = func(esConfig *conf.C) error { + esClient, err := eslegclient.NewConnectedClient(esConfig, "Packetbeat") + if err != nil { + return err + } + _, err = module.UploadPipelines(b.Info, esClient, overwritePipelines) + return err + } + } + return &packetbeat{ - config: rawConfig, - factory: factory, - done: make(chan struct{}), + config: rawConfig, + factory: factory, + overwritePipelines: overwritePipelines, + done: make(chan struct{}), }, nil } // Run starts the packetbeat network capture, decoding and event publication, sending -// events to b.Publisher. If b is mananaged, packetbeat is registered with the +// events to b.Publisher. If b is managed, packetbeat is registered with the // reload.Registry and handled by fleet. Otherwise it is run until cancelled or a // fatal error. func (pb *packetbeat) Run(b *beat.Beat) error { @@ -138,11 +162,28 @@ func (pb *packetbeat) Run(b *beat.Beat) error { } if !b.Manager.Enabled() { + if b.Config.Output.Name() == "elasticsearch" { + _, err := elasticsearch.RegisterConnectCallback(func(esClient *eslegclient.Connection) error { + _, err := module.UploadPipelines(b.Info, esClient, pb.overwritePipelines) + return err + }) + if err != nil { + return err + } + } else { + logp.L().Warn(pipelinesWarning) + } + return pb.runStatic(b, pb.factory) } return pb.runManaged(b, pb.factory) } +const pipelinesWarning = "Packetbeat is unable to load the ingest pipelines for the configured" + + " modules because the Elasticsearch output is not configured/enabled. If you have" + + " already loaded the ingest pipelines or are using Logstash pipelines, you" + + " can ignore this warning." + // runStatic constructs a packetbeat runner and starts it, returning on cancellation // or the first fatal error. func (pb *packetbeat) runStatic(b *beat.Beat, factory *processorFactory) error { diff --git a/packetbeat/config/config.go b/packetbeat/config/config.go index 13d00b89e44b..7d579af635bf 100644 --- a/packetbeat/config/config.go +++ b/packetbeat/config/config.go @@ -33,14 +33,15 @@ import ( var errFanoutGroupAFPacketOnly = errors.New("fanout_group is only valid with af_packet type") type Config struct { - Interface *InterfaceConfig `config:"interfaces"` - Interfaces []InterfaceConfig `config:"interfaces"` - Flows *Flows `config:"flows"` - Protocols map[string]*conf.C `config:"protocols"` - ProtocolsList []*conf.C `config:"protocols"` - Procs procs.ProcsConfig `config:"procs"` - IgnoreOutgoing bool `config:"ignore_outgoing"` - ShutdownTimeout time.Duration `config:"shutdown_timeout"` + Interface *InterfaceConfig `config:"interfaces"` + Interfaces []InterfaceConfig `config:"interfaces"` + Flows *Flows `config:"flows"` + Protocols map[string]*conf.C `config:"protocols"` + ProtocolsList []*conf.C `config:"protocols"` + Procs procs.ProcsConfig `config:"procs"` + IgnoreOutgoing bool `config:"ignore_outgoing"` + ShutdownTimeout time.Duration `config:"shutdown_timeout"` + OverwritePipelines bool `config:"overwrite_pipelines"` // Only used by standalone Packetbeat. } // FromStatic initializes a configuration given a config.C diff --git a/packetbeat/docs/howto/howto.asciidoc b/packetbeat/docs/howto/howto.asciidoc index cdadf3cb7b35..b7284ab3024b 100644 --- a/packetbeat/docs/howto/howto.asciidoc +++ b/packetbeat/docs/howto/howto.asciidoc @@ -23,6 +23,8 @@ include::{libbeat-dir}/howto/load-dashboards.asciidoc[] include::{libbeat-dir}/shared-geoip.asciidoc[] +include::load-ingest-pipelines.asciidoc[] + :standalone: include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: diff --git a/packetbeat/docs/howto/load-ingest-pipelines.asciidoc b/packetbeat/docs/howto/load-ingest-pipelines.asciidoc new file mode 100644 index 000000000000..acca824829c5 --- /dev/null +++ b/packetbeat/docs/howto/load-ingest-pipelines.asciidoc @@ -0,0 +1,28 @@ +[[load-ingest-pipelines]] +== Load ingest pipelines + +{beatname_uc} modules are implemented using {es} ingest node +pipelines. The events receive their transformations within +{es}. The ingest node pipelines must be loaded +into {es}. This can happen one of several ways. + +[id="{beatname_lc}-load-pipeline-auto"] +[float] +=== On connection to {es} + +{beatname_uc} will send ingest pipelines automatically to {es} if the +{es} output is enabled. + +Make sure the user specified in +{beatname_lc}.yml+ is +<>. + +If {beatname_uc} is sending events to {ls} or another output you need +to load the ingest pipelines with the `setup` command or manually. + +[id="{beatname_lc}-load-pipeline-manual"] +[float] +=== Manually install pipelines + +Pipelines can be loaded them into {es} with the `_ingest/pipeline` REST API +call. The user making the REST API call will need to have the `ingest_admin` +role assigned to them. diff --git a/packetbeat/docs/modules.asciidoc b/packetbeat/docs/modules.asciidoc new file mode 100644 index 000000000000..8e72454f9cff --- /dev/null +++ b/packetbeat/docs/modules.asciidoc @@ -0,0 +1,41 @@ +[id="{beatname_lc}-modules"] +[role="xpack"] += Modules + +[partintro] +-- +This section contains detailed information about the available network packet +log processing modules contained in {beatname_uc}. + +{beatname_uc} modules are implemented using Elasticsearch Ingest Node pipelines. +The events receive their transformations within Elasticsearch. All events are +sent through {beatname_uc}'s "routing" pipeline that routes events to specific +module pipelines based on their network protocol. + +{beatname_uc}'s default config file contains the option to send all events to +the routing pipeline. If you remove this option then the module processing +will not be applied. + +[source,yaml,subs="attributes"] +---- +output.elasticsearch.pipeline: packetbeat-%{[agent.version]}-routing +---- + +The general goal of each module is to transform events by renaming fields to +comply with the {ecs-ref}/index.html[Elastic Common Schema] (ECS). The modules +may also apply additional categorization, tagging, and parsing as necessary. +about how to configure the language in `packetbeat`, refer to <>. + +[id="{beatname_lc}-modules-setup"] +[float] +=== Setup of Ingest Node pipelines + +{beatname_uc}'s Ingest Node pipelines must be installed to Elasticsearch if you +want to apply the module processing to events. The simplest way to get started +is to use the Elasticsearch output and {beatname_uc} will automatically install +the pipelines when it first connects to Elasticsearch. + +Installation Methods + +1. <<{beatname_lc}-load-pipeline-auto>> +2. <<{beatname_lc}-load-pipeline-manual>> diff --git a/packetbeat/docs/packetbeat-options.asciidoc b/packetbeat/docs/packetbeat-options.asciidoc index c5cb4d95d6b8..c48b4a1b01d0 100644 --- a/packetbeat/docs/packetbeat-options.asciidoc +++ b/packetbeat/docs/packetbeat-options.asciidoc @@ -1650,3 +1650,12 @@ Example configuration: ------------------------------------------------------------------------------------- packetbeat.shutdown_timeout: 5s ------------------------------------------------------------------------------------- + +[float] +==== `overwrite_pipelines` + +By default Ingest pipelines are not updated if a pipeline with the same ID +already exists. If this option is enabled {beatname_uc} overwrites pipelines +every time a new Elasticsearch connection is established. + +The default value is `false`. diff --git a/packetbeat/magefile.go b/packetbeat/magefile.go index 50c8a19310ca..00e4f9dd47ba 100644 --- a/packetbeat/magefile.go +++ b/packetbeat/magefile.go @@ -29,19 +29,20 @@ import ( "github.com/elastic/beats/v7/dev-tools/mage/target/build" packetbeat "github.com/elastic/beats/v7/packetbeat/scripts/mage" - // mage:import + //mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/common" - // mage:import + //mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" - // mage:import + //mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" - // mage:import + //mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) func init() { common.RegisterCheckDeps(Update) unittest.RegisterPythonTestDeps(packetbeat.FieldsYML, Dashboards) + packetbeat.SelectLogic = devtools.OSSProject devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." } diff --git a/packetbeat/module/pipeline.go b/packetbeat/module/pipeline.go new file mode 100644 index 000000000000..9e6d23849386 --- /dev/null +++ b/packetbeat/module/pipeline.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package module + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/joeshaw/multierror" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/filebeat/fileset" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" + "github.com/elastic/elastic-agent-libs/logp" +) + +// PipelinesFS is used from the x-pack/packetbeat code to inject modules. The +// OSS version does not have modules. +var PipelinesFS *embed.FS + +var errNoFS = errors.New("no embedded file system") + +const logName = "pipeline" + +type pipeline struct { + id string + contents map[string]interface{} +} + +// UploadPipelines reads all pipelines embedded in the Packetbeat executable +// and adapts the pipeline for a given ES version, converts to JSON if +// necessary and creates or updates ingest pipeline in ES. The IDs of pipelines +// uploaded to ES are returned in loaded. +func UploadPipelines(info beat.Info, esClient *eslegclient.Connection, overwritePipelines bool) (loaded []string, err error) { + pipelines, err := readAll(info) + if err != nil { + return nil, err + } + return load(esClient, pipelines, overwritePipelines) +} + +// readAll reads pipelines from the the embedded filesystem and +// returns a slice of pipelines suitable for sending to Elasticsearch +// with load. +func readAll(info beat.Info) (pipelines []pipeline, err error) { + p, err := readDir(".", info) + if err == errNoFS { //nolint:errorlint // Bad linter! This is never wrapped. + return nil, nil + } + return p, err +} + +func readDir(dir string, info beat.Info) (pipelines []pipeline, err error) { + if PipelinesFS == nil { + return nil, errNoFS + } + dirEntries, err := PipelinesFS.ReadDir(dir) + if err != nil { + return nil, err + } + for _, de := range dirEntries { + if de.IsDir() { + subPipelines, err := readDir(path.Join(dir, de.Name()), info) + if err != nil { + return nil, err + } + pipelines = append(pipelines, subPipelines...) + continue + } + p, err := readFile(path.Join(dir, de.Name()), info) + if err == errNoFS { //nolint:errorlint // Bad linter! This is never wrapped. + continue + } + if err != nil { + return nil, err + } + pipelines = append(pipelines, p) + } + return pipelines, nil +} + +func readFile(filename string, info beat.Info) (p pipeline, err error) { + if PipelinesFS == nil { + return pipeline{}, errNoFS + } + contents, err := PipelinesFS.ReadFile(filename) + if err != nil { + return pipeline{}, err + } + updatedContent, err := applyTemplates(info.IndexPrefix, info.Version, filename, contents) + if err != nil { + return pipeline{}, err + } + ds, _, _ := strings.Cut(filename, string(os.PathSeparator)) + p = pipeline{ + id: fileset.FormatPipelineID(info.IndexPrefix, "", "", ds, info.Version), + contents: updatedContent, + } + return p, nil +} + +// load uses esClient to load pipelines to Elasticsearch cluster. +// The IDs of loaded pipelines will be returned in loaded. +// load will only overwrite existing pipelines if overwritePipelines is +// true. An error in loading one of the pipelines will cause the +// successfully loaded ones to be deleted. +func load(esClient *eslegclient.Connection, pipelines []pipeline, overwritePipelines bool) (loaded []string, err error) { + log := logp.NewLogger(logName) + + for _, pipeline := range pipelines { + err = fileset.LoadPipeline(esClient, pipeline.id, pipeline.contents, overwritePipelines, log) + if err != nil { + err = fmt.Errorf("error loading pipeline %s: %w", pipeline.id, err) + break + } + loaded = append(loaded, pipeline.id) + } + + if err != nil { + errs := multierror.Errors{err} + for _, id := range loaded { + err = fileset.DeletePipeline(esClient, id) + if err != nil { + errs = append(errs, err) + } + } + return nil, errs.Err() + } + return loaded, nil +} + +func applyTemplates(prefix string, version string, filename string, original []byte) (converted map[string]interface{}, err error) { + vars := map[string]interface{}{ + "builtin": map[string]interface{}{ + "prefix": prefix, + "module": "", + "fileset": "", + "beatVersion": version, + }, + } + + encodedString, err := fileset.ApplyTemplate(vars, string(original), true) + if err != nil { + return nil, fmt.Errorf("failed to apply template: %w", err) + } + + var content map[string]interface{} + switch extension := strings.ToLower(filepath.Ext(filename)); extension { + case ".json": + if err = json.Unmarshal([]byte(encodedString), &content); err != nil { + return nil, fmt.Errorf("error JSON decoding the pipeline file: %s: %w", filename, err) + } + case ".yaml", ".yml": + if err = yaml.Unmarshal([]byte(encodedString), &content); err != nil { + return nil, fmt.Errorf("error YAML decoding the pipeline file: %s: %w", filename, err) + } + newContent, err := fileset.FixYAMLMaps(content) + if err != nil { + return nil, fmt.Errorf("failed to sanitize the YAML pipeline file: %s: %w", filename, err) + } + content = newContent.(map[string]interface{}) + default: + return nil, fmt.Errorf("unsupported extension '%s' for pipeline file: %s", extension, filename) + } + return content, nil +} diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 1e013fb081f5..c9dac77048ad 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -78,6 +78,11 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Packetbeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#packetbeat.overwrite_pipelines: false + # =================================== Flows ==================================== packetbeat.flows: diff --git a/packetbeat/scripts/mage/config.go b/packetbeat/scripts/mage/config.go index 5213f4f1f87c..f41b50ffff75 100644 --- a/packetbeat/scripts/mage/config.go +++ b/packetbeat/scripts/mage/config.go @@ -30,11 +30,18 @@ func device(goos string) string { return "default_route" } +// SelectLogic configures the types of project logic to use (OSS vs X-Pack). +// It is set in the packetbeat and x-pack/packetbeat magefiles. +var SelectLogic devtools.ProjectType + // ConfigFileParams returns the default ConfigFileParams for generating // packetbeat*.yml files. func ConfigFileParams() devtools.ConfigFileParams { p := devtools.DefaultConfigFileParams() p.Templates = append(p.Templates, devtools.OSSBeatDir("_meta/config/*.tmpl")) + if SelectLogic == devtools.XPackProject { + p.Templates = append(p.Templates, devtools.XPackBeatDir("_meta/config/*.tmpl")) + } p.ExtraVars = map[string]interface{}{ "device": device, } diff --git a/testing/certutil/certutil.go b/testing/certutil/certutil.go new file mode 100644 index 000000000000..422bf4969d43 --- /dev/null +++ b/testing/certutil/certutil.go @@ -0,0 +1,186 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package certutil + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" +) + +// TODO: move it to a more generic place. Probably elastic-agent-client. +// Moving it to the agent-client would allow to have a mock.StubServerV2 with +// TLS out of the box. With that, we could also remove the +// `management.insecure_grpc_url_for_testing` flag from the beats. +// This can also be expanded to save the certificates and keys to disk, making +// an tool for us to generate certificates whenever we need. + +// NewRootCA generates a new x509 Certificate and returns: +// - the private key +// - the certificate +// - the certificate in PEM format as a byte slice. +// +// If any error occurs during the generation process, a non-nil error is returned. +func NewRootCA() (*ecdsa.PrivateKey, *x509.Certificate, []byte, error) { + rootKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create private key: %w", err) + } + + notBefore := time.Now() + notAfter := notBefore.Add(3 * time.Hour) + + rootTemplate := x509.Certificate{ + DNSNames: []string{"localhost"}, + SerialNumber: big.NewInt(1653), + Subject: pkix.Name{ + Organization: []string{"Gallifrey"}, + CommonName: "localhost", + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + rootCertRawBytes, err := x509.CreateCertificate( + rand.Reader, &rootTemplate, &rootTemplate, &rootKey.PublicKey, rootKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create CA: %w", err) + } + + rootPrivKeyDER, err := x509.MarshalECPrivateKey(rootKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not marshal private key: %w", err) + } + + // PEM private key + var rootPrivBytesOut []byte + rootPrivateKeyBuff := bytes.NewBuffer(rootPrivBytesOut) + err = pem.Encode(rootPrivateKeyBuff, &pem.Block{ + Type: "EC PRIVATE KEY", Bytes: rootPrivKeyDER}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode private key: %w", err) + } + + // PEM certificate + var rootCertBytesOut []byte + rootCertPemBuff := bytes.NewBuffer(rootCertBytesOut) + err = pem.Encode(rootCertPemBuff, &pem.Block{ + Type: "CERTIFICATE", Bytes: rootCertRawBytes}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode certificate: %w", err) + } + + // tls.Certificate + rootTLSCert, err := tls.X509KeyPair( + rootCertPemBuff.Bytes(), rootPrivateKeyBuff.Bytes()) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create key pair: %w", err) + } + + rootCACert, err := x509.ParseCertificate(rootTLSCert.Certificate[0]) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not parse certificate: %w", err) + } + + return rootKey, rootCACert, rootCertPemBuff.Bytes(), nil +} + +// GenerateChildCert generates a x509 Certificate as a child of caCert and +// returns the following: +// - the certificate in PEM format as a byte slice +// - the private key in PEM format as a byte slice +// - the certificate and private key as a tls.Certificate +// +// If any error occurs during the generation process, a non-nil error is returned. +func GenerateChildCert(name string, caPrivKey *ecdsa.PrivateKey, caCert *x509.Certificate) ( + []byte, []byte, *tls.Certificate, error) { + + notBefore := time.Now() + notAfter := notBefore.Add(3 * time.Hour) + + certTemplate := &x509.Certificate{ + DNSNames: []string{name}, + SerialNumber: big.NewInt(1658), + Subject: pkix.Name{ + Organization: []string{"Gallifrey"}, + CommonName: name, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + } + + privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create private key: %w", err) + } + + certRawBytes, err := x509.CreateCertificate( + rand.Reader, certTemplate, caCert, &privateKey.PublicKey, caPrivKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create CA: %w", err) + } + + privateKeyDER, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not marshal private key: %w", err) + } + + // PEM private key + var privBytesOut []byte + privateKeyBuff := bytes.NewBuffer(privBytesOut) + err = pem.Encode(privateKeyBuff, &pem.Block{ + Type: "EC PRIVATE KEY", Bytes: privateKeyDER}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode private key: %w", err) + } + privateKeyPemBytes := privateKeyBuff.Bytes() + + // PEM certificate + var certBytesOut []byte + certBuff := bytes.NewBuffer(certBytesOut) + err = pem.Encode(certBuff, &pem.Block{ + Type: "CERTIFICATE", Bytes: certRawBytes}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode certificate: %w", err) + } + certPemBytes := certBuff.Bytes() + + // TLS Certificate + tlsCert, err := tls.X509KeyPair(certPemBytes, privateKeyPemBytes) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create key pair: %w", err) + } + + return privateKeyPemBytes, certPemBytes, &tlsCert, nil +} diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0901d9638f64..f6eeaaa7382a 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-l534sdis-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-c6fcd738-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-l534sdis-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-c6fcd738-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-l534sdis-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-c6fcd738-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc b/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc index fa795f0b6b2a..0d7f842249e1 100644 --- a/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc +++ b/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc @@ -24,7 +24,7 @@ to load the ingest pipelines with the `setup` command or manually. === setup command On a machine that has {beatname_uc} installed and has {es} configured -as the outup, run the `setup` command with the `--pipelines` option +as the output, run the `setup` command with the `--pipelines` option specified. For example, the following command loads the ingest pipelines: diff --git a/x-pack/auditbeat/module/system/_meta/fields.yml b/x-pack/auditbeat/module/system/_meta/fields.yml index 61908a6ce292..43101839c0a1 100644 --- a/x-pack/auditbeat/module/system/_meta/fields.yml +++ b/x-pack/auditbeat/module/system/_meta/fields.yml @@ -30,6 +30,25 @@ - name: process type: group fields: + - name: thread.capabilities.effective + level: extended + type: keyword + ignore_above: 1024 + description: This is the set of capabilities used by the kernel to perform permission + checks for the thread. + example: '["CAP_BPF", "CAP_SYS_ADMIN"]' + pattern: ^(CAP_[A-Z_]+|\d+)$ + default_field: false + - name: thread.capabilities.permitted + level: extended + type: keyword + ignore_above: 1024 + description: This is a limiting superset for the effective capabilities that + the thread may assume. + example: '["CAP_BPF", "CAP_SYS_ADMIN"]' + pattern: ^(CAP_[A-Z_]+|\d+)$ + default_field: false + - name: hash type: group description: > diff --git a/x-pack/auditbeat/module/system/fields.go b/x-pack/auditbeat/module/system/fields.go index 7711dffe2c09..4b0a95d23b0a 100644 --- a/x-pack/auditbeat/module/system/fields.go +++ b/x-pack/auditbeat/module/system/fields.go @@ -19,5 +19,5 @@ func init() { // AssetSystem returns asset data. // This is the base64 encoded zlib format compressed contents of module/system. func AssetSystem() string { - return "eJy0WV1v2zoSffevGPSlCeAqiNsEhR8WSJtiE2y7DdYp0DebEscSNxSpS1JJ1F9/QerDkk3ZlqMroEDNkOec+eBoSH2AJyzmoAttMJ0AGGY4zuHdwg28mwBQ1JFimWFSzOFfEwCAxwQ1AlEIJkFYM+RUQ4wCFTFIISzceIkJqaQ5x2ACoJAj0TiHEA2ZQLVwPpkAfABBUpwDPqMwjsMUGc4hVjLP3O96sv1/PVsqFjPhhuoFT1i8SEWrMY92+/x060CunU7HGcBjwjRERECIQGDNOEJGTAJnGMQBrC6eibrgMrb/gsvV+bRBk8rBWEk1ZGV6JNNMChQGTEIM6DzLOEPqplBiSI0t0HAmnlbnQdsXuUZ1tCtQGGaKJaPDvXF/C7lgf+XIC2DUAq0LJmKn0moAKYBAIrUJ4N6A9ZJMs9xGmmggsLi7+TC7uoaE6GTjlNIRdhXc305LIPsfImj5w+oOOjYYVCkThA834bFaWdNago4vMyUj1Ppod7Zs2Z7eK+KO6AR1k1WvGOWGhBxtaqG1Q7stQ3gsFTNJ6qi0c4hd8Ex4jm5Kg+g8iK+AIpIUKVAWozbVTGfftv6NBSEnTzgLl7Or6w2ex6Nb5nz5fvOfb7OwCajHnEkP08fPn05h+vj501Cmq8vZKUxXl7NjmXRCZrNB5izubmazoy3RCRnorsXdzQBPWfzlcAvcmmEcw9Kr5Dg+txzHCZ5aDvXVwJRyHMPy6epydkJEri5nF8Ni4ngGR8XxHB+X19fkepApv39f7zWiMcC9OQOSU+bvAzzFt1sAW0VcatMM+gp5D179rCzACiIpDGGi7nB4+VJjYi1VSuy6oLVqu8epn22Nrdd8ZliKHeJSKZci7gyXhHOguXK8nT8ykeVmWU8RREiNkRRUd2bJ3LSnEX1LCu+MTGHEtHPKZefve/xln1/OGmCiLSHwmB1KaXoMp8TgEM4vUhqwWD6eKnqo2B+kHrJQSo5EDOFboAG2rtLAdkANh0+AFfZHCgzsT4+A7W1zhID/tlrNGr7dcU3B9ZVfFo97Bcn1WqMJNEbHZN8BTY8bHRbVZsCe6FuV4/njrkLzMTFf0E/kgPtbHwVRUcIMRiZXIxrUga1OCq+fr5fXn859IlLii+IJ3D9uvgKhVKHW6I0dyzxEW4MHOO4f9lNI7aHYrtwHWFZSt2p3q1wDCWVu3GaRmT2y2sNO9d7p1tudmt0uKxR3Enif1w/65OeiAZ3a8kJEUUVdG4UmSs4Dr5KME2NtG1VJDVopiFAYqaeQh7kw+RRemKDyRfcoGt0v7mhdKvlBIjvyu4d6TVLGi1HJS8iKXiFNiJkCxZARMYW1Qgw1PeSRZ1R6+4X9Vl0Vpp/wCZVAPh7fo2ezvNcVzX4plnVUw+1qONOI8O3rAqQO7EDL8c3GINETifFNHWCFsbeQEAFMaEM4RwpSgcJUPiOt+d/WHW7f6xxy4F737bvpqdUevOKpGo2dyDRXPRUSlJWMiMYTvjzprRknmvjQIvfx+HbiG6n2WFXFe0y2CrKvDxmTqt2A+Pg4i1CMa10F6W07yj02ypmhpqswew8Pmv056mR2FJkF85LkaUpUcQJgudCHmSs+Zlh+/e/7bn1t7qfbFEOKqwU42KLZSbq8gt7t0Y6vp/9UdwLwq3uZveMlto34drbuMWTDFY/L9W8by14yytTYhr3XkMgULTRGRnZTu33JhXzE3gbgQclYkRSMBJULIAa4jFlPP2MTctnK1VE9Xt0wuQ8k7Rsm+CngOxP56xRMwrR9Q9vNEWMkdZntPRmxc2aqFcrw/xiZYQJXDu5AM1SUpHrz/YhpyIgytnE4C7GQ1QePvIx4ppitYuWqrf7Zv5Nh/24+FIWjIgFN/u9ubdi75Tb0TBiMcXuXDKTv234Z0dpjXN9R+XBsa8D94W2iVs2GMyFN1UBWI8xo5OvBkfScE2CsSN7syLawATxIrVnI2x/fYKUTQuXLsvFHD+ZZx2jXGduNKcoPwA7DfUU+n258u6RMk5AjXU17UFdCbpgtR7nZKRExKplr14+LQgp036q5jIGJc9dm9yFGqshMG/QlQdENmYuN1X6BJrpwwxQ0Yqp7QI2ss8Qef1A4DnfmKRF3ot/qGok2yyixBvVvnZ12rnyOCvaj+7pedGpMbegL0U4AVAKCyd8BAAD//yDbzZE=" + return "eJy8Wm1v27oV/u5fcRAMaILrKotvExT+MMBt7pZg7W0wp0C3uzubEo8lLhSpkVQSFfvxA6kXSzZlW4k7A0VjmXyec57z4kPJb+EBiynoQhtMRwCGGY5TOJm7CycjAIo6UiwzTIop/GkEAHCfoEYgCsEkCCuGnGqIUaAiBimEhbteYkIqac4xGAEo5Eg0TiFEQ0ZQbZyORgBvQZAUp4CPKIzjMEWGU4iVzDP3vl5s/65XS8ViJtylesMDFk9S0eqax3b7+uL2gVw5Ox1nAPcJ0xARASECgRXjCBkxCZxiEAewPH8k6pzL2P4LLpZn4wZNKgdjTaohK9cjmWZSoDBgEmJA51nGGVK3hBJDamyBhjPxsDwL2lrkGtXBUqAwzBQLRoercXsNuWD/yZEXwKgFWhVMxM5KawNIAQQSqU0AtwasSjLNchtpooHA/Gb2dnJ5BQnRyVqUUgi7C26vxyWQ/YMIWr6xdgcdHwyqlAnCh7twX+2saS1BR8tMyQi1PlhOkygkNIhIRkLGmWGoA1ytMDLsEStajo/Ip4DPBgXFXcKzWEiFCxLKR5zCxR8n73zuuARkukwgNNaXNr91qqmtB1QCORgJGaqVVKn9P2VaMykaVaIEowcNqypBK5+qj/GZpJkt9Te/nXyc3S0+3P35ZAzuz/nf54vZ9efbX09+f1OtzogxqMQU/nVqV/w2e/uPxe8//fef9KezPzS+rEjOzcLJOYUV4Rr3auqsNqZR70dpSoCzlBmb1jrPUFl9a12auHbltiXbSLnWD1JSANE6r7P3/yVlR8tWrW2mc2+R3BCdoG663jNGuSEhR9v6bEYV2rV0wmOpmElSR6VdwdoNj4Tn6JZ0VEnwGVBEkiIFymLUploZjKp17fpaexBy8oCTcDG5vFrjeeK84c6HT7O//jIJm4bjcWfUw/Tz+3cvYfr5/buhTJcXk5cwXV5MDmXSCZlMBrkzv5lNJgd7ohMyUK75zWyAUhZ/MdwDt2cYx7D0KjkOzy3H8QKlFkO1GphSjmNYPl1eTF4QkcuLyfmwmDiewVFxPIfH5fk5uRrkyrdvVzudaBxwk11Acsr8c6qn+XYbYKuJS73+hvE18h68+rW0AEuIpDCEiXoC5+XQxYQdC4jdF7R2bc7g9WvTxtYYmhmWYoe4tJRLEXcul4RToLlyvJ0Pmchys6iXCCKkxkgKqjurZG7ay4i+JoV3RaYwYtqJctH5fIde9vXVeQNMtE0IPG6HUpoexykxOITzg5QGLJaPp4oeKvYdqYcslJIjEUP45miArao0sLNPw+EzwBr2XQoM7FuPAZtlc4ABv7aOQjV8+0QwBnfu+TC/32mQXK00mkBjdEj27bHpfm2HRbUZsCP61srj6XFTofmYmC/oL+SA22sfBVFRwgxGJldHdKgDW51kn99fLa7enfmMSIkvii/g/jz7CIRShVqjN3Ys8xBtXNzDcXu3m0JqD8Vm597DspS61btb7RpIKHPjikVmaFupPbWU3zvdfrvVs9ttheJWAu9Sfa8mX+YN6Ni2FyKKKuraKDRRchZ4Lck4Mda3o1pSg1YWRCiM1GPIw1yYfAxPTFD5pHssOrou7tZPaclnEtkr33qoVyRlvDgqeQlZ0SukCTFjoBgyIsawUoihpvsUeUSlN7+wX2tXheknLO9fHI/v3lMsb3R9m2SnKZb1qI7b3XCqEeGXj3OQOrAXWsI3hUGiBxLjqybACmNnIyECmNCGcI4UpAKFqXxEWvO/bjrcvO+4T8Cd8u26E1lbu/cWZDVobEWmuRVZIUHZyYholPDlSW/PeKGLdy1yH4+vEl9JtcOrKt7HZKsg++aQY1K1BxAfH2cRiuN6V0F6x46yxo5yZqjpKszew4Nm3w86mR1EZsG8JHmaElW8ALDc6MPMFT9mWL7+7dN2f22en7QphjRXC7B3RLOLdPmIZHtGO7yf/qjpBOBr92HLlkpsE/H1bN1jyJorPi7XX2wse8koU8d27I2GRKZooTEyspva7ZtcyI842wDcKRkrkoKRoHIBxACXMeuZZ2xCLlq5elTFqztM7gFe+w4TfBHwiYn8eQym9RgrxkjqMtt7MmLrzFRbKMN/Y2SGGbh0cHuGoaIk1evnm0xDRpR74HYaYiGrBx55GfFMMdvFyl0b87O/kmF3Ne+LwkGRgCb/t0sbdpbcmp4JgzFuVslA+r7yy4jWHuf6jsr7Y1sD7g5vE7VqNZwKaaoBsrrCjEa+GhxJzzkBjhXJ2ZbZFjaAO6k1C3n74RssdUKofFo0evRgnnacdpOxLUxRPl92GO5XDmfjtbYLyjQJOdLluAd1KeSa2XKUxU6JiFHJXLt5XBRSoPstBZcxMHHmxuw+xEgVmWmDPiUouiFzsbG2n6OJzt1lChox1T2gRtZZYo8/KByHO/OUiFvRb02NRJtFlFiH+ktna5wrXwcF+979+qPo9Jja0SeinQFQGRCM/hcAAP//VUidAg==" } diff --git a/x-pack/auditbeat/module/system/process/process.go b/x-pack/auditbeat/module/system/process/process.go index d2dfae065980..08a72fe562e5 100644 --- a/x-pack/auditbeat/module/system/process/process.go +++ b/x-pack/auditbeat/module/system/process/process.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/auditbeat/helper/hasher" + "github.com/elastic/beats/v7/libbeat/common/capabilities" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/cache" @@ -101,12 +102,14 @@ type MetricSet struct { // Process represents information about a process. type Process struct { - Info types.ProcessInfo - UserInfo *types.UserInfo - User *user.User - Group *user.Group - Hashes map[hasher.HashType]hasher.Digest - Error error + Info types.ProcessInfo + UserInfo *types.UserInfo + User *user.User + Group *user.Group + CapEffective []string + CapPermitted []string + Hashes map[hasher.HashType]hasher.Digest + Error error } // Hash creates a hash for Process. @@ -376,6 +379,13 @@ func (ms *MetricSet) processEvent(process *Process, eventType string, action eve event.RootFields.Put("user.group.name", process.Group.Name) } + if len(process.CapEffective) > 0 { + event.RootFields.Put("process.thread.capabilities.effective", process.CapEffective) + } + if len(process.CapPermitted) > 0 { + event.RootFields.Put("process.thread.capabilities.permitted", process.CapPermitted) + } + if process.Hashes != nil { for hashType, digest := range process.Hashes { fieldName := "process.hash." + string(hashType) @@ -489,8 +499,20 @@ func (ms *MetricSet) getProcesses() ([]*Process, error) { } // Exclude Linux kernel processes, they are not very interesting. - if runtime.GOOS == "linux" && userInfo.UID == "0" && process.Info.Exe == "" { - continue + if runtime.GOOS == "linux" { + if userInfo.UID == "0" && process.Info.Exe == "" { + continue + } + + // Fetch Effective and Permitted capabilities + process.CapEffective, err = capabilities.FromPid(capabilities.Effective, pInfo.PID) + if err != nil && process.Error == nil { + process.Error = err + } + process.CapPermitted, err = capabilities.FromPid(capabilities.Permitted, pInfo.PID) + if err != nil && process.Error == nil { + process.Error = err + } } processes = append(processes, process) diff --git a/x-pack/auditbeat/module/system/socket/events.go b/x-pack/auditbeat/module/system/socket/events.go index ad652b9aac57..beb0a988a7c9 100644 --- a/x-pack/auditbeat/module/system/socket/events.go +++ b/x-pack/auditbeat/module/system/socket/events.go @@ -18,7 +18,7 @@ import ( "golang.org/x/sys/unix" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" + "github.com/elastic/beats/v7/auditbeat/tracing" ) const ( diff --git a/x-pack/auditbeat/module/system/socket/guess/creds.go b/x-pack/auditbeat/module/system/socket/guess/creds.go index 7df1b0c1c2f5..8c808dcdbe53 100644 --- a/x-pack/auditbeat/module/system/socket/guess/creds.go +++ b/x-pack/auditbeat/module/system/socket/guess/creds.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go b/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go index d77dc7a2bbe0..258d9f21a4f9 100644 --- a/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go +++ b/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go @@ -13,8 +13,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/deref.go b/x-pack/auditbeat/module/system/socket/guess/deref.go index 7996a8cd8b36..e2c3c0082c54 100644 --- a/x-pack/auditbeat/module/system/socket/guess/deref.go +++ b/x-pack/auditbeat/module/system/socket/guess/deref.go @@ -13,8 +13,8 @@ import ( "strconv" "syscall" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/guess.go b/x-pack/auditbeat/module/system/socket/guess/guess.go index 05c2aa4668a1..718afa0ad7b5 100644 --- a/x-pack/auditbeat/module/system/socket/guess/guess.go +++ b/x-pack/auditbeat/module/system/socket/guess/guess.go @@ -12,8 +12,8 @@ import ( "fmt" "time" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsock.go b/x-pack/auditbeat/module/system/socket/guess/inetsock.go index 707db38b7e74..f9d1db85639f 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsock.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsock.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsock6.go b/x-pack/auditbeat/module/system/socket/guess/inetsock6.go index 4a937a554854..438c09d65c3f 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsock6.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsock6.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go b/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go index 60fbfed71055..69676b41a2d9 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/iplocalout.go b/x-pack/auditbeat/module/system/socket/guess/iplocalout.go index 6a997af23ae2..26a95405e8ec 100644 --- a/x-pack/auditbeat/module/system/socket/guess/iplocalout.go +++ b/x-pack/auditbeat/module/system/socket/guess/iplocalout.go @@ -13,8 +13,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/skbuff.go b/x-pack/auditbeat/module/system/socket/guess/skbuff.go index 85589f8a4fe3..ba53089aed38 100644 --- a/x-pack/auditbeat/module/system/socket/guess/skbuff.go +++ b/x-pack/auditbeat/module/system/socket/guess/skbuff.go @@ -17,8 +17,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go b/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go index 2a76d564ba5b..bfaebf544af4 100644 --- a/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go +++ b/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go b/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go index 5564015530b7..f9f7c1874215 100644 --- a/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go +++ b/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go @@ -13,8 +13,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/socketsk.go b/x-pack/auditbeat/module/system/socket/guess/socketsk.go index 5ebc0ab7de65..3c12cd294630 100644 --- a/x-pack/auditbeat/module/system/socket/guess/socketsk.go +++ b/x-pack/auditbeat/module/system/socket/guess/socketsk.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/syscallargs.go b/x-pack/auditbeat/module/system/socket/guess/syscallargs.go index 3930e7134b9c..902940985b93 100644 --- a/x-pack/auditbeat/module/system/socket/guess/syscallargs.go +++ b/x-pack/auditbeat/module/system/socket/guess/syscallargs.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go index faa3910ba5f6..058736eec56f 100644 --- a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go +++ b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go @@ -10,8 +10,8 @@ package guess import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go index 450a336df6e1..73f810e74146 100644 --- a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go +++ b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go b/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go index 5ab70f92a480..09241e6641ea 100644 --- a/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go +++ b/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go @@ -10,8 +10,8 @@ package guess import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/helper/probes.go b/x-pack/auditbeat/module/system/socket/helper/probes.go index 24ad0eda3d9a..3ebb3e2cfcbc 100644 --- a/x-pack/auditbeat/module/system/socket/helper/probes.go +++ b/x-pack/auditbeat/module/system/socket/helper/probes.go @@ -12,7 +12,7 @@ import ( "strings" "text/template" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/helper/types.go b/x-pack/auditbeat/module/system/socket/helper/types.go index 1365aeaf9e0f..d466e847e751 100644 --- a/x-pack/auditbeat/module/system/socket/helper/types.go +++ b/x-pack/auditbeat/module/system/socket/helper/types.go @@ -7,7 +7,7 @@ package helper import ( - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" + "github.com/elastic/beats/v7/auditbeat/tracing" ) // Logger exposes logging functions. diff --git a/x-pack/auditbeat/module/system/socket/kprobes.go b/x-pack/auditbeat/module/system/socket/kprobes.go index 3660f6a5a1dc..a87813459910 100644 --- a/x-pack/auditbeat/module/system/socket/kprobes.go +++ b/x-pack/auditbeat/module/system/socket/kprobes.go @@ -14,8 +14,8 @@ import ( "github.com/joeshaw/multierror" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/kprobes_test.go b/x-pack/auditbeat/module/system/socket/kprobes_test.go index fdaeac8f8bc6..8ddca79e957a 100644 --- a/x-pack/auditbeat/module/system/socket/kprobes_test.go +++ b/x-pack/auditbeat/module/system/socket/kprobes_test.go @@ -11,9 +11,9 @@ import ( "strings" "testing" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/guess" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" ) func probeName(p tracing.Probe) string { diff --git a/x-pack/auditbeat/module/system/socket/socket_linux.go b/x-pack/auditbeat/module/system/socket/socket_linux.go index c7b7a9794538..b334b8488921 100644 --- a/x-pack/auditbeat/module/system/socket/socket_linux.go +++ b/x-pack/auditbeat/module/system/socket/socket_linux.go @@ -23,13 +23,13 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/guess" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/go-perf" diff --git a/x-pack/auditbeat/module/system/socket/state.go b/x-pack/auditbeat/module/system/socket/state.go index a302bba0caa5..19bb729a8442 100644 --- a/x-pack/auditbeat/module/system/socket/state.go +++ b/x-pack/auditbeat/module/system/socket/state.go @@ -20,12 +20,12 @@ import ( "github.com/joeshaw/multierror" "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/flowhash" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/dns" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/go-libaudit/v2/aucoalesce" ) diff --git a/x-pack/auditbeat/module/system/socket/state_test.go b/x-pack/auditbeat/module/system/socket/state_test.go index 611581c5d30c..fd3e125cc408 100644 --- a/x-pack/auditbeat/module/system/socket/state_test.go +++ b/x-pack/auditbeat/module/system/socket/state_test.go @@ -18,10 +18,10 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/dns" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" ) type logWrapper testing.T diff --git a/x-pack/auditbeat/module/system/socket/template.go b/x-pack/auditbeat/module/system/socket/template.go index 84f890e5be19..c1a97a163b9d 100644 --- a/x-pack/auditbeat/module/system/socket/template.go +++ b/x-pack/auditbeat/module/system/socket/template.go @@ -10,8 +10,8 @@ import ( "strings" "unsafe" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/tracing/doc.go b/x-pack/auditbeat/tracing/doc.go deleted file mode 100644 index 0d716eaf7c97..000000000000 --- a/x-pack/auditbeat/tracing/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Package tracing provides a set of tools built on top of -// golang.org/x/sys/unix/linux/perf that simplify working with KProbes and -// UProbes, using tracing perf channels to receive events from the kernel and -// decoding of this raw events into more useful types. -package tracing diff --git a/x-pack/auditbeat/tracing/endian.go b/x-pack/auditbeat/tracing/endian.go deleted file mode 100644 index acb18aa9afa2..000000000000 --- a/x-pack/auditbeat/tracing/endian.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux - -package tracing - -import ( - "encoding/binary" - "unsafe" -) - -// MachineEndian is either binary.BigEndian or binary.LittleEndian, depending -// on the current architecture. -var MachineEndian = getCPUEndianness() - -func getCPUEndianness() binary.ByteOrder { - myInt32 := new(uint32) - copy((*[4]byte)(unsafe.Pointer(myInt32))[:], []byte{0x12, 0x34, 0x56, 0x78}) - switch *myInt32 { - case 0x12345678: - return binary.BigEndian - case 0x78563412: - return binary.LittleEndian - default: - panic("cannot determine endianness") - } -} diff --git a/x-pack/auditbeat/tracing/int_aligned.go b/x-pack/auditbeat/tracing/int_aligned.go deleted file mode 100644 index 6c8c4c539725..000000000000 --- a/x-pack/auditbeat/tracing/int_aligned.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux && !386 && !amd64 && !amd64p32 - -// Alignment-safe integer reading and writing functions. - -package tracing - -import ( - "errors" - "unsafe" -) - -var errBadSize = errors.New("bad size for integer") - -func copyInt(dst unsafe.Pointer, src unsafe.Pointer, len uint8) error { - copy((*(*[maxIntSizeBytes]byte)(dst))[:len], (*(*[maxIntSizeBytes]byte)(src))[:len]) - return nil -} - -func readInt(ptr unsafe.Pointer, len uint8, signed bool) (value interface{}, err error) { - asSlice := (*(*[maxIntSizeBytes]byte)(ptr))[:] - switch len { - case 1: - if signed { - value = int8(asSlice[0]) - } else { - value = uint8(asSlice[0]) - } - case 2: - if signed { - value = int16(MachineEndian.Uint16(asSlice)) - } else { - value = MachineEndian.Uint16(asSlice) - } - - case 4: - if signed { - value = int32(MachineEndian.Uint32(asSlice)) - } else { - value = MachineEndian.Uint32(asSlice) - } - - case 8: - if signed { - value = int64(MachineEndian.Uint64(asSlice)) - } else { - value = MachineEndian.Uint64(asSlice) - } - - default: - return nil, errBadSize - } - return -} diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index a2512580169d..b6eaa9ad744e 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] :type: cel -:mito_version: v1.7.0 +:mito_version: v1.8.0 :mito_docs: https://pkg.go.dev/github.com/elastic/mito@{mito_version} [id="{beatname_lc}-input-{type}"] @@ -580,6 +580,13 @@ The RSA JWK Private Key JSON for your Okta Service App which is used for interac NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ +[float] +==== `auth.oauth2.okta.jwk_pem` + +The RSA JWK private key PEM block for your Okta Service App which is used for interacting with Okta Org Auth Server to mint tokens with okta.* scopes. + +NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ + [[resource-parameters]] [float] ==== `resource.url` diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index 410edf9f9485..cc3594780e4c 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -11,6 +11,8 @@ Use the `httpjson` input to read messages from an HTTP API with JSON payloads. +If you are starting development of a new custom HTTP API input, we recommend that you use the <> which provides greater flexibility and an improved developer experience. + This input supports: * Auth @@ -399,8 +401,12 @@ NOTE: Only one of the credentials settings can be set at once. For more informat The RSA JWK Private Key JSON for your Okta Service App which is used for interacting with Okta Org Auth Server to mint tokens with okta.* scopes. -NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ +[float] +==== `auth.oauth2.okta.jwk_pem` +The RSA JWK private key PEM block for your Okta Service App which is used for interacting with Okta Org Auth Server to mint tokens with okta.* scopes. + +NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ [float] ==== `auth.oauth2.google.delegated_account` diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf index 2c4fb00786bc..78b0a4741477 100644 --- a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf @@ -3,3 +3,26 @@ variable "aws_region" { type = string default = "us-east-1" } + +variable "BRANCH" { + description = "Branch name or pull request for tagging purposes" + default = "unknown-branch" +} + +variable "BUILD_ID" { + description = "Build ID in the CI for tagging purposes" + default = "unknown-build" +} + +variable "CREATED_DATE" { + description = "Creation date in epoch time for tagging purposes" + default = "unknown-date" +} + +variable "ENVIRONMENT" { + default = "unknown-environment" +} + +variable "REPO" { + default = "unknown-repo-name" +} diff --git a/x-pack/filebeat/input/cel/config.go b/x-pack/filebeat/input/cel/config.go index 94b41190fa6c..3ce271afaa3c 100644 --- a/x-pack/filebeat/input/cel/config.go +++ b/x-pack/filebeat/input/cel/config.go @@ -89,7 +89,7 @@ func (c config) Validate() error { if len(c.Regexps) != 0 { patterns = map[string]*regexp.Regexp{".": nil} } - _, err = newProgram(context.Background(), c.Program, root, client, nil, nil, patterns, c.XSDs, logp.L().Named("input.cel"), nil) + _, _, err = newProgram(context.Background(), c.Program, root, client, nil, nil, patterns, c.XSDs, logp.L().Named("input.cel"), nil) if err != nil { return fmt.Errorf("failed to check program: %w", err) } diff --git a/x-pack/filebeat/input/cel/config_auth.go b/x-pack/filebeat/input/cel/config_auth.go index e550a9635d51..d6b35d633e69 100644 --- a/x-pack/filebeat/input/cel/config_auth.go +++ b/x-pack/filebeat/input/cel/config_auth.go @@ -6,6 +6,7 @@ package cel import ( "context" + "crypto/x509" "encoding/json" "errors" "fmt" @@ -141,6 +142,7 @@ type oAuth2Config struct { // okta specific RSA JWK private key OktaJWKFile string `config:"okta.jwk_file"` OktaJWKJSON common.JSONBlob `config:"okta.jwk_json"` + OktaJWKPEM string `config:"okta.jwk_pem"` } // isEnabled returns true if the `enable` field is set to true in the yaml. @@ -321,8 +323,26 @@ func (o *oAuth2Config) validateGoogleProvider() error { } func (o *oAuth2Config) validateOktaProvider() error { - if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 || (o.OktaJWKJSON == nil && o.OktaJWKFile == "") { - return errors.New("okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided") + if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 { + return errors.New("okta validation error: token_url, client_id, scopes must be provided") + } + var n int + if o.OktaJWKJSON != nil { + n++ + } + if o.OktaJWKFile != "" { + n++ + } + if o.OktaJWKPEM != "" { + n++ + } + if n != 1 { + return errors.New("okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided") + } + // jwk_pem + if o.OktaJWKPEM != "" { + _, err := x509.ParsePKCS1PrivateKey([]byte(o.OktaJWKPEM)) + return err } // jwk_file if o.OktaJWKFile != "" { diff --git a/x-pack/filebeat/input/cel/config_okta_auth.go b/x-pack/filebeat/input/cel/config_okta_auth.go index cf9003dee8a1..74366afd3d5f 100644 --- a/x-pack/filebeat/input/cel/config_okta_auth.go +++ b/x-pack/filebeat/input/cel/config_okta_auth.go @@ -5,10 +5,13 @@ package cel import ( + "bytes" "context" "crypto/rsa" + "crypto/x509" "encoding/base64" "encoding/json" + "encoding/pem" "fmt" "math/big" "net/http" @@ -43,9 +46,20 @@ func (o *oAuth2Config) fetchOktaOauthClient(ctx context.Context, _ *http.Client) }, } - oktaJWT, err := generateOktaJWT(o.OktaJWKJSON, conf) - if err != nil { - return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + var ( + oktaJWT string + err error + ) + if len(o.OktaJWKPEM) != 0 { + oktaJWT, err = generateOktaJWTPEM(o.OktaJWKPEM, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT PEM: %w", err) + } + } else { + oktaJWT, err = generateOktaJWT(o.OktaJWKJSON, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + } } token, err := exchangeForBearerToken(ctx, oktaJWT, conf) @@ -59,14 +73,16 @@ func (o *oAuth2Config) fetchOktaOauthClient(ctx context.Context, _ *http.Client) oktaJWK: o.OktaJWKJSON, token: token, } - // reuse the tokenSource to refresh the token (automatically calls the custom Token() method when token is no longer valid). + // reuse the tokenSource to refresh the token (automatically calls + // the custom Token() method when token is no longer valid). client := oauth2.NewClient(ctx, oauth2.ReuseTokenSource(token, tokenSource)) return client, nil } -// Token implements the oauth2.TokenSource interface and helps to implement custom token refresh logic. -// Parent context is passed via the customTokenSource struct since we cannot modify the function signature here. +// Token implements the oauth2.TokenSource interface and helps to implement +// custom token refresh logic. The parent context is passed via the +// customTokenSource struct since we cannot modify the function signature here. func (ts *oktaTokenSource) Token() (*oauth2.Token, error) { ts.mu.Lock() defer ts.mu.Unlock() @@ -85,70 +101,79 @@ func (ts *oktaTokenSource) Token() (*oauth2.Token, error) { } func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { - // unmarshal the JWK into a map - var jwkData map[string]string + // Unmarshal the JWK into big ints. + var jwkData struct { + N base64int `json:"n"` + E base64int `json:"e"` + D base64int `json:"d"` + P base64int `json:"p"` + Q base64int `json:"q"` + Dp base64int `json:"dp"` + Dq base64int `json:"dq"` + Qinv base64int `json:"qi"` + } err := json.Unmarshal(oktaJWK, &jwkData) if err != nil { return "", fmt.Errorf("error decoding JWK: %w", err) } - // create an RSA private key from JWK components - decodeBase64 := func(key string) (*big.Int, error) { - data, err := base64.RawURLEncoding.DecodeString(jwkData[key]) - if err != nil { - return nil, fmt.Errorf("error decoding RSA JWK component %s: %w", key, err) - } - return new(big.Int).SetBytes(data), nil + // Create an RSA private key from JWK components. + key := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: &jwkData.N.Int, + E: int(jwkData.E.Int64()), + }, + D: &jwkData.D.Int, + Primes: []*big.Int{&jwkData.P.Int, &jwkData.Q.Int}, + Precomputed: rsa.PrecomputedValues{ + Dp: &jwkData.Dp.Int, + Dq: &jwkData.Dq.Int, + Qinv: &jwkData.Qinv.Int, + }, } - n, err := decodeBase64("n") - if err != nil { - return "", err - } - e, err := decodeBase64("e") - if err != nil { - return "", err - } - d, err := decodeBase64("d") - if err != nil { - return "", err - } - p, err := decodeBase64("p") - if err != nil { - return "", err + return signJWT(cnf, key) + +} + +// base64int is a JSON decoding shim for base64-encoded big.Int. +type base64int struct { + big.Int +} + +func (i *base64int) UnmarshalJSON(b []byte) error { + src, ok := bytes.CutPrefix(b, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - q, err := decodeBase64("q") - if err != nil { - return "", err + src, ok = bytes.CutSuffix(src, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - dp, err := decodeBase64("dp") + dst := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + _, err := base64.RawURLEncoding.Decode(dst, src) if err != nil { - return "", err + return err } - dq, err := decodeBase64("dq") - if err != nil { - return "", err + i.SetBytes(dst) + return nil +} + +func generateOktaJWTPEM(pemdata string, cnf *oauth2.Config) (string, error) { + blk, rest := pem.Decode([]byte(pemdata)) + if rest := bytes.TrimSpace(rest); len(rest) != 0 { + return "", fmt.Errorf("PEM text has trailing data: %s", rest) } - qi, err := decodeBase64("qi") + key, err := x509.ParsePKCS8PrivateKey(blk.Bytes) if err != nil { return "", err } + return signJWT(cnf, key) +} - privateKeyRSA := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: n, - E: int(e.Int64()), - }, - D: d, - Primes: []*big.Int{p, q}, - Precomputed: rsa.PrecomputedValues{ - Dp: dp, - Dq: dq, - Qinv: qi, - }, - } - - // create a JWT token using required claims and sign it with the private key +// signJWT creates a JWT token using required claims and sign it with the +// private key. +func signJWT(cnf *oauth2.Config, key any) (string, error) { now := time.Now() tok, err := jwt.NewBuilder().Audience([]string{cnf.Endpoint.TokenURL}). Issuer(cnf.ClientID). @@ -159,11 +184,10 @@ func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { if err != nil { return "", err } - signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, privateKeyRSA)) + signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, key)) if err != nil { return "", fmt.Errorf("failed to sign token: %w", err) } - return string(signedToken), nil } diff --git a/x-pack/filebeat/input/cel/config_okta_auth_test.go b/x-pack/filebeat/input/cel/config_okta_auth_test.go new file mode 100644 index 000000000000..fc02a2ec9e79 --- /dev/null +++ b/x-pack/filebeat/input/cel/config_okta_auth_test.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cel + +import ( + "testing" + + "github.com/lestrrat-go/jwx/v2/jwt" + "golang.org/x/oauth2" +) + +func TestGenerateOktaJWT(t *testing.T) { + // jwt is a JWT obtained from the Okta integration. + const jwtText = `{ "d": "Cmhokw2MnZfX6da36nnsnQ7IPX9vE6se8_D1NgyL9j9rarYpexhlp45hswcAIFNgWA03NV848Gc0e84AW6wMbyD2E8LPI0Bd8lhdmzRE6L4or2Rxqqjk2Pr2aqGnqs4A0uTijAA7MfPF1zFFdR3EOVx499fEeTiMcLjO83IJCoNiOySDoQgt3KofX5bCbaDy2eiB83rzf0fEcWrWfTY65_Hc2c5lek-1uuF7NpELVzX80p5H-b9MOfLn0BdOGe-mJ2j5bXi-UCQ45Wxj2jdkoA_Qwb4MEtXZjp5LjcM75SrlGfVd99acML2wGZgYLGweJ0sAPDlKzGvj4ve-JT8nNw", "p": "8-UBb4psN0wRPktkh3S48L3ng4T5zR08t7nwXDYNajROrS2j7oq60dtlGY4IwgwcC0c9GDQP7NiN2IpU2uahYkGQ7lDyM_h7UfQWL5fMrsYiKgn2pUgSy5TTT8smkSLbJAD35nAH6PknsQ2PuvOlb4laiC0MXw1Rw4vT9HAEB9M", "q": "0DJkPEN0bECG_6lorlNJgIfoNahVevGKK-Yti1YZ5K-nQCuffPCwPG0oZZo_55y5LODe9W7psxnAt7wxkpAY4lK2hpHTWJSkPjqXWFYIP8trn4RZDShnJXli0i1XqPOqkiVzBZGx5nLtj2bUtmXfIU7-kneHGvLQ5EXcyQW1ISM", "dp": "Ye1PWEPSE5ndSo_m-2RoZXE6pdocmrjkijiEQ-IIHN6HwI0Ux1C4lk5rF4mqBo_qKrUd2Lv-sPB6c7mHPKVhoxwEX0vtE-TvTwacadufeYVgblS1zcNUmJ1XAzDkeV3vc1NYNhRBeM-hmjuBvGTbxh72VLsRvpCQhd186yaW17U", "dq": "jvSK7vZCUrJb_-CLCGgX6DFpuK5FQ43mmg4K58nPLb-Oz_kkId4CpPsu6dToXFi4raAad9wYi-n68i4-u6xF6eFxgyVOQVyPCkug7_7i2ysKUxXFL8u2R3z55edMca4eSQt91y0bQmlXxUeOd0-rzms3UcrQ8igYVyXBXCaXIJE", "qi": "iIY1Y4bzMYIFG7XH7gNP7C-mWi6QH4l9aGRTzPB_gPaFThvc0XKW0S0l82bfp_PPPWg4D4QpDCp7rZ6KhEA8BlNi86Vt3V6F3Hz5XiDa4ikgQNsAXiXLqf83R-y1-cwHjW70PP3U89hmalCRRFfVXcLHV77AVHqbrp9rAIo-X-I", "kty": "RSA", "e": "AQAB", "kid": "koeFQjkyiav_3Qwr3aRinCqCD2LaEHOjFnje7XlkbdI", "n": "xloTY8bAuI5AEo8JursCd7w0LmELCae7JOFaVo9njGrG8tRNqgIdjPyoGY_ABwKkmjcCMLGMA29llFDbry8rB4LTWai-h_jX4_uUUnl52mLX-lO6merL5HEPZF438Ql9Hrxs5yGzT8n865-E_3uwYSBrhTjvlZJeXYUeVHfKo8pJSSsw3RZEjBW4Tt0eFmCZnFErtTyk3oUPaYVP-8YLLAenhUDV4Lm1dC4dxqUj0Oh6XrWgIb-eYHGolMY9g9xbgyd4ir39RodA_1DOjzHWpNfCM-J5ZOtfpuKCAe5__u7L8FT0m56XOxcDoVVsz1J1VNrACWAGbhDWNjyHfL5E2Q" }` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWT([]byte(jwtText), cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} + +func TestGenerateOktaJWTPEM(t *testing.T) { + // jwtText is generated by https://mkjwk.org/ using the instructions at + // https://developer.okta.com/docs/guides/dpop/nonoktaresourceserver/main/#create-the-json-web-token + const jwtText = ` +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCOuef3HMRhohVT +5kSoAJgV+atpDjkwTwkOq+ImnbBlv75GaApG90w8VpjXjhqN/1KJmwfyrKiquiMq +OPu+o/672Dys5rUAaWSbT7wRF1GjLDDZrM0GHRdV4DGxM/LKI8I5yE1Mx3EzV+D5 +ZLmcRc5U4oEoMwtGpr0zRZ7uUr6a28UQwcUsVIPItc1/9rERlo1WTv8dcaj4ECC3 +2Sc0y/F+9XqwJvLd4Uv6ckzP0Sv4tbDA+7jpD9MneAIUiZ4LVj2cwbBd+YRY6jXx +MkevcCSmSX60clBY1cIFkw1DYHqtdHEwAQcQHLGMoi72xRP2qrdzIPsaTKVYoHVo +WA9vADdHAgMBAAECggEAIlx7jjCsztyYyeQsL05FTzUWoWo9NnYwtgmHnshkCXsK +MiUmJEOxZO1sSqj5l6oakupyFWigCspZYPbrFNCiqVK7+NxqQzkccY/WtT6p9uDS +ufUyPwCN96zMCd952lSVlBe3FH8Hr9a+YQxw60CbFjCZ67WuR0opTsi6JKJjJSDb +TQQZ4qJR97D05I1TgfmO+VO7G/0/dDaNHnnlYz0AnOgZPSyvrU2G5cYye4842EMB +ng81xjHD+xp55JNui/xYkhmYspYhrB2KlEjkKb08OInUjBeaLEAgA1r9yOHsfV/3 +DQzDPRO9iuqx5BfJhdIqUB1aifrye+sbxt9uMBtUgQKBgQDVdfO3GYT+ZycOQG9P +QtdMn6uiSddchVCGFpk331u6M6yafCKjI/MlJDl29B+8R5sVsttwo8/qnV/xd3cn +pY14HpKAsE4l6/Ciagzoj+0NqfPEDhEzbo8CyArcd7pSxt3XxECAfZe2+xivEPHe +gFO60vSFjFtvlLRMDMOmqX3kYQKBgQCrK1DISyQTnD6/axsgh2/ESOmT7n+JRMx/ +YzA7Lxu3zGzUC8/sRDa1C41t054nf5ZXJueYLDSc4kEAPddzISuCLxFiTD2FQ75P +lHWMgsEzQObDm4GPE9cdKOjoAvtAJwbvZcjDa029CDx7aCaDzbNvdmplZ7EUrznR +55U8Wsm8pwKBgBytxTmzZwfbCgdDJvFKNKzpwuCB9TpL+v6Y6Kr2Clfg+26iAPFU +MiWqUUInGGBuamqm5g6jI5sM28gQWeTsvC4IRXyes1Eq+uCHSQax15J/Y+3SSgNT +9kjUYYkvWMwoRcPobRYWSZze7XkP2L8hFJ7EGvAaZGqAWxzgliS9HtnhAoGAONZ/ +UqMw7Zoac/Ga5mhSwrj7ZvXxP6Gqzjofj+eKqrOlB5yMhIX6LJATfH6iq7cAMxxm +Fu/G4Ll4oB3o5wACtI3wldV/MDtYfJBtoCTjBqPsfNOsZ9hMvBATlsc2qwzKjsAb +tFhzTevoOYpSD75EcSS/G8Ec2iN9bagatBnpl00CgYBVqAOFZelNfP7dj//lpk8y +EUAw7ABOq0S9wkpFWTXIVPoBQUipm3iAUqGNPmvr/9ShdZC9xeu5AwKram4caMWJ +ExRhcDP1hFM6CdmSkIYEgBKvN9N0O4Lx1ba34gk74Hm65KXxokjJHOC0plO7c7ok +LNV/bIgMHOMoxiGrwyjAhg== +-----END PRIVATE KEY----- +` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWTPEM(jwtText, cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} diff --git a/x-pack/filebeat/input/cel/config_test.go b/x-pack/filebeat/input/cel/config_test.go index 0cd404705e2d..e4c98b78dc5e 100644 --- a/x-pack/filebeat/input/cel/config_test.go +++ b/x-pack/filebeat/input/cel/config_test.go @@ -489,8 +489,8 @@ var oAuth2ValidationTests = []struct { }, }, { - name: "okta requires token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file to be provided", - wantErr: errors.New("okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided accessing 'auth.oauth2'"), + name: "unique_okta_jwk_token", + wantErr: errors.New("okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided accessing 'auth.oauth2'"), input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ "provider": "okta", @@ -501,7 +501,7 @@ var oAuth2ValidationTests = []struct { }, }, { - name: "okta oauth2 validation fails if jwk_json is not a valid JSON", + name: "invalid_okta_jwk_json", wantErr: errors.New("the field can't be converted to valid JSON accessing 'auth.oauth2.okta.jwk_json'"), input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ @@ -514,7 +514,7 @@ var oAuth2ValidationTests = []struct { }, }, { - name: "okta successful oauth2 validation", + name: "okta_successful_oauth2_validation", input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ "provider": "okta", diff --git a/x-pack/filebeat/input/cel/input.go b/x-pack/filebeat/input/cel/input.go index e90ee60535a3..12dd4c4dcecf 100644 --- a/x-pack/filebeat/input/cel/input.go +++ b/x-pack/filebeat/input/cel/input.go @@ -151,7 +151,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p Password: cfg.Auth.Basic.Password, } } - prg, err := newProgram(ctx, cfg.Program, root, client, limiter, auth, patterns, cfg.XSDs, log, trace) + prg, ast, err := newProgram(ctx, cfg.Program, root, client, limiter, auth, patterns, cfg.XSDs, log, trace) if err != nil { return err } @@ -233,7 +233,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p log.Debugw("request state", logp.Namespace("cel"), "state", redactor{state: state, cfg: cfg.Redact}) metrics.executions.Add(1) start := i.now().In(time.UTC) - state, err = evalWith(ctx, prg, state, start) + state, err = evalWith(ctx, prg, ast, state, start) log.Debugw("response state", logp.Namespace("cel"), "state", redactor{state: state, cfg: cfg.Redact}) if err != nil { switch { @@ -723,7 +723,9 @@ func newClient(ctx context.Context, cfg config, log *logp.Logger) (*http.Client, ) traceLogger := zap.New(core) - trace = httplog.NewLoggingRoundTripper(c.Transport, traceLogger) + const margin = 1e3 // 1OkB ought to be enough room for all the remainder of the trace details. + maxSize := cfg.Resource.Tracer.MaxSize * 1e6 + trace = httplog.NewLoggingRoundTripper(c.Transport, traceLogger, max(0, maxSize-margin)) c.Transport = trace } @@ -898,10 +900,10 @@ var ( } ) -func newProgram(ctx context.Context, src, root string, client *http.Client, limiter *rate.Limiter, auth *lib.BasicAuth, patterns map[string]*regexp.Regexp, xsd map[string]string, log *logp.Logger, trace *httplog.LoggingRoundTripper) (cel.Program, error) { +func newProgram(ctx context.Context, src, root string, client *http.Client, limiter *rate.Limiter, auth *lib.BasicAuth, patterns map[string]*regexp.Regexp, xsd map[string]string, log *logp.Logger, trace *httplog.LoggingRoundTripper) (cel.Program, *cel.Ast, error) { xml, err := lib.XML(nil, xsd) if err != nil { - return nil, fmt.Errorf("failed to build xml type hints: %w", err) + return nil, nil, fmt.Errorf("failed to build xml type hints: %w", err) } opts := []cel.EnvOption{ cel.Declarations(decls.NewVar(root, decls.Dyn)), @@ -930,19 +932,19 @@ func newProgram(ctx context.Context, src, root string, client *http.Client, limi } env, err := cel.NewEnv(opts...) if err != nil { - return nil, fmt.Errorf("failed to create env: %w", err) + return nil, nil, fmt.Errorf("failed to create env: %w", err) } ast, iss := env.Compile(src) if iss.Err() != nil { - return nil, fmt.Errorf("failed compilation: %w", iss.Err()) + return nil, nil, fmt.Errorf("failed compilation: %w", iss.Err()) } prg, err := env.Program(ast) if err != nil { - return nil, fmt.Errorf("failed program instantiation: %w", err) + return nil, nil, fmt.Errorf("failed program instantiation: %w", err) } - return prg, nil + return prg, ast, nil } func debug(log *logp.Logger, trace *httplog.LoggingRoundTripper) func(string, any) { @@ -960,7 +962,7 @@ func debug(log *logp.Logger, trace *httplog.LoggingRoundTripper) func(string, an } } -func evalWith(ctx context.Context, prg cel.Program, state map[string]interface{}, now time.Time) (map[string]interface{}, error) { +func evalWith(ctx context.Context, prg cel.Program, ast *cel.Ast, state map[string]interface{}, now time.Time) (map[string]interface{}, error) { out, _, err := prg.ContextEval(ctx, map[string]interface{}{ // Replace global program "now" with current time. This is necessary // as the lib.Time now global is static at program instantiation time @@ -974,6 +976,9 @@ func evalWith(ctx context.Context, prg cel.Program, state map[string]interface{} "now": now, root: state, }) + if err != nil { + err = lib.DecoratedError{AST: ast, Err: err} + } if e := ctx.Err(); e != nil { err = e } diff --git a/x-pack/filebeat/input/cel/input_test.go b/x-pack/filebeat/input/cel/input_test.go index c3d31f6ef627..1ee7704f8263 100644 --- a/x-pack/filebeat/input/cel/input_test.go +++ b/x-pack/filebeat/input/cel/input_test.go @@ -1333,7 +1333,10 @@ var inputTests = []struct { want: []map[string]interface{}{ { "error": map[string]interface{}{ - "message": "failed eval: no such overload", // This is the best we get for some errors from CEL. + // This is the best we get for some errors from CEL. + "message": `failed eval: ERROR: :3:56: no such overload + | bytes(get(state.url+'/'+r.id).Body).decode_json()).as(events, { + | .......................................................^`, }, }, }, diff --git a/x-pack/filebeat/input/http_endpoint/handler.go b/x-pack/filebeat/input/http_endpoint/handler.go index 75e34c0928e1..0e2620b5b658 100644 --- a/x-pack/filebeat/input/http_endpoint/handler.go +++ b/x-pack/filebeat/input/http_endpoint/handler.go @@ -177,7 +177,9 @@ func (h *handler) logRequest(r *http.Request, status int, respBody []byte) { zap.ByteString("http.response.body.content", respBody), ) } - httplog.LogRequest(h.reqLogger, r, extra...) + // Limit request logging body size to 10kiB. + const maxBodyLen = 10 * (1 << 10) + httplog.LogRequest(h.reqLogger, r, maxBodyLen, extra...) if scheme != "" { r.URL.Scheme = scheme } diff --git a/x-pack/filebeat/input/httpjson/config_auth.go b/x-pack/filebeat/input/httpjson/config_auth.go index 948948037770..d05592dfa500 100644 --- a/x-pack/filebeat/input/httpjson/config_auth.go +++ b/x-pack/filebeat/input/httpjson/config_auth.go @@ -6,6 +6,7 @@ package httpjson import ( "context" + "crypto/x509" "encoding/json" "errors" "fmt" @@ -104,6 +105,7 @@ type oAuth2Config struct { // okta specific RSA JWK private key OktaJWKFile string `config:"okta.jwk_file"` OktaJWKJSON common.JSONBlob `config:"okta.jwk_json"` + OktaJWKPEM string `config:"okta.jwk_pem"` } // IsEnabled returns true if the `enable` field is set to true in the yaml. @@ -289,8 +291,26 @@ func (o *oAuth2Config) validateGoogleProvider() error { } func (o *oAuth2Config) validateOktaProvider() error { - if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 || (o.OktaJWKJSON == nil && o.OktaJWKFile == "") { - return errors.New("okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided") + if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 { + return errors.New("okta validation error: token_url, client_id, scopes must be provided") + } + var n int + if o.OktaJWKJSON != nil { + n++ + } + if o.OktaJWKFile != "" { + n++ + } + if o.OktaJWKPEM != "" { + n++ + } + if n != 1 { + return errors.New("okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided") + } + // jwk_pem + if o.OktaJWKPEM != "" { + _, err := x509.ParsePKCS1PrivateKey([]byte(o.OktaJWKPEM)) + return err } // jwk_file if o.OktaJWKFile != "" { diff --git a/x-pack/filebeat/input/httpjson/config_okta_auth.go b/x-pack/filebeat/input/httpjson/config_okta_auth.go index 8bf2995d746a..c2b4289d9c91 100644 --- a/x-pack/filebeat/input/httpjson/config_okta_auth.go +++ b/x-pack/filebeat/input/httpjson/config_okta_auth.go @@ -5,10 +5,13 @@ package httpjson import ( + "bytes" "context" "crypto/rsa" + "crypto/x509" "encoding/base64" "encoding/json" + "encoding/pem" "fmt" "math/big" "net/http" @@ -43,9 +46,20 @@ func (o *oAuth2Config) fetchOktaOauthClient(ctx context.Context, _ *http.Client) }, } - oktaJWT, err := generateOktaJWT(o.OktaJWKJSON, conf) - if err != nil { - return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + var ( + oktaJWT string + err error + ) + if len(o.OktaJWKPEM) != 0 { + oktaJWT, err = generateOktaJWTPEM(o.OktaJWKPEM, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT PEM: %w", err) + } + } else { + oktaJWT, err = generateOktaJWT(o.OktaJWKJSON, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + } } token, err := exchangeForBearerToken(ctx, oktaJWT, conf) @@ -85,70 +99,78 @@ func (ts *oktaTokenSource) Token() (*oauth2.Token, error) { } func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { - // unmarshal the JWK into a map - var jwkData map[string]string + // Unmarshal the JWK into big ints. + var jwkData struct { + N base64int `json:"n"` + E base64int `json:"e"` + D base64int `json:"d"` + P base64int `json:"p"` + Q base64int `json:"q"` + Dp base64int `json:"dp"` + Dq base64int `json:"dq"` + Qinv base64int `json:"qi"` + } err := json.Unmarshal(oktaJWK, &jwkData) if err != nil { return "", fmt.Errorf("error decoding JWK: %w", err) } - // create an RSA private key from JWK components - decodeBase64 := func(key string) (*big.Int, error) { - data, err := base64.RawURLEncoding.DecodeString(jwkData[key]) - if err != nil { - return nil, fmt.Errorf("error decoding RSA JWK component %s: %w", key, err) - } - return new(big.Int).SetBytes(data), nil + // Create an RSA private key from JWK components. + key := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: &jwkData.N.Int, + E: int(jwkData.E.Int64()), + }, + D: &jwkData.D.Int, + Primes: []*big.Int{&jwkData.P.Int, &jwkData.Q.Int}, + Precomputed: rsa.PrecomputedValues{ + Dp: &jwkData.Dp.Int, + Dq: &jwkData.Dq.Int, + Qinv: &jwkData.Qinv.Int, + }, } - n, err := decodeBase64("n") - if err != nil { - return "", err - } - e, err := decodeBase64("e") - if err != nil { - return "", err - } - d, err := decodeBase64("d") - if err != nil { - return "", err - } - p, err := decodeBase64("p") - if err != nil { - return "", err + return signJWT(cnf, key) + +} + +// base64int is a JSON decoding shim for base64-encoded big.Int. +type base64int struct { + big.Int +} + +func (i *base64int) UnmarshalJSON(b []byte) error { + src, ok := bytes.CutPrefix(b, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - q, err := decodeBase64("q") - if err != nil { - return "", err + src, ok = bytes.CutSuffix(src, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - dp, err := decodeBase64("dp") + dst := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + _, err := base64.RawURLEncoding.Decode(dst, src) if err != nil { - return "", err + return err } - dq, err := decodeBase64("dq") - if err != nil { - return "", err + i.SetBytes(dst) + return nil +} + +func generateOktaJWTPEM(pemdata string, cnf *oauth2.Config) (string, error) { + blk, rest := pem.Decode([]byte(pemdata)) + if rest := bytes.TrimSpace(rest); len(rest) != 0 { + return "", fmt.Errorf("PEM text has trailing data: %s", rest) } - qi, err := decodeBase64("qi") + key, err := x509.ParsePKCS8PrivateKey(blk.Bytes) if err != nil { return "", err } + return signJWT(cnf, key) +} - privateKeyRSA := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: n, - E: int(e.Int64()), - }, - D: d, - Primes: []*big.Int{p, q}, - Precomputed: rsa.PrecomputedValues{ - Dp: dp, - Dq: dq, - Qinv: qi, - }, - } - - // create a JWT token using required claims and sign it with the private key +// signJWT creates a JWT token using required claims and sign it with the private key. +func signJWT(cnf *oauth2.Config, key any) (string, error) { now := time.Now() tok, err := jwt.NewBuilder().Audience([]string{cnf.Endpoint.TokenURL}). Issuer(cnf.ClientID). @@ -159,11 +181,10 @@ func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { if err != nil { return "", err } - signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, privateKeyRSA)) + signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, key)) if err != nil { return "", fmt.Errorf("failed to sign token: %w", err) } - return string(signedToken), nil } diff --git a/x-pack/filebeat/input/httpjson/config_okta_auth_test.go b/x-pack/filebeat/input/httpjson/config_okta_auth_test.go new file mode 100644 index 000000000000..2f686af04373 --- /dev/null +++ b/x-pack/filebeat/input/httpjson/config_okta_auth_test.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package httpjson + +import ( + "testing" + + "github.com/lestrrat-go/jwx/v2/jwt" + "golang.org/x/oauth2" +) + +func TestGenerateOktaJWT(t *testing.T) { + // jwt is a JWT obtained from the Okta integration. + const jwtText = `{ "d": "Cmhokw2MnZfX6da36nnsnQ7IPX9vE6se8_D1NgyL9j9rarYpexhlp45hswcAIFNgWA03NV848Gc0e84AW6wMbyD2E8LPI0Bd8lhdmzRE6L4or2Rxqqjk2Pr2aqGnqs4A0uTijAA7MfPF1zFFdR3EOVx499fEeTiMcLjO83IJCoNiOySDoQgt3KofX5bCbaDy2eiB83rzf0fEcWrWfTY65_Hc2c5lek-1uuF7NpELVzX80p5H-b9MOfLn0BdOGe-mJ2j5bXi-UCQ45Wxj2jdkoA_Qwb4MEtXZjp5LjcM75SrlGfVd99acML2wGZgYLGweJ0sAPDlKzGvj4ve-JT8nNw", "p": "8-UBb4psN0wRPktkh3S48L3ng4T5zR08t7nwXDYNajROrS2j7oq60dtlGY4IwgwcC0c9GDQP7NiN2IpU2uahYkGQ7lDyM_h7UfQWL5fMrsYiKgn2pUgSy5TTT8smkSLbJAD35nAH6PknsQ2PuvOlb4laiC0MXw1Rw4vT9HAEB9M", "q": "0DJkPEN0bECG_6lorlNJgIfoNahVevGKK-Yti1YZ5K-nQCuffPCwPG0oZZo_55y5LODe9W7psxnAt7wxkpAY4lK2hpHTWJSkPjqXWFYIP8trn4RZDShnJXli0i1XqPOqkiVzBZGx5nLtj2bUtmXfIU7-kneHGvLQ5EXcyQW1ISM", "dp": "Ye1PWEPSE5ndSo_m-2RoZXE6pdocmrjkijiEQ-IIHN6HwI0Ux1C4lk5rF4mqBo_qKrUd2Lv-sPB6c7mHPKVhoxwEX0vtE-TvTwacadufeYVgblS1zcNUmJ1XAzDkeV3vc1NYNhRBeM-hmjuBvGTbxh72VLsRvpCQhd186yaW17U", "dq": "jvSK7vZCUrJb_-CLCGgX6DFpuK5FQ43mmg4K58nPLb-Oz_kkId4CpPsu6dToXFi4raAad9wYi-n68i4-u6xF6eFxgyVOQVyPCkug7_7i2ysKUxXFL8u2R3z55edMca4eSQt91y0bQmlXxUeOd0-rzms3UcrQ8igYVyXBXCaXIJE", "qi": "iIY1Y4bzMYIFG7XH7gNP7C-mWi6QH4l9aGRTzPB_gPaFThvc0XKW0S0l82bfp_PPPWg4D4QpDCp7rZ6KhEA8BlNi86Vt3V6F3Hz5XiDa4ikgQNsAXiXLqf83R-y1-cwHjW70PP3U89hmalCRRFfVXcLHV77AVHqbrp9rAIo-X-I", "kty": "RSA", "e": "AQAB", "kid": "koeFQjkyiav_3Qwr3aRinCqCD2LaEHOjFnje7XlkbdI", "n": "xloTY8bAuI5AEo8JursCd7w0LmELCae7JOFaVo9njGrG8tRNqgIdjPyoGY_ABwKkmjcCMLGMA29llFDbry8rB4LTWai-h_jX4_uUUnl52mLX-lO6merL5HEPZF438Ql9Hrxs5yGzT8n865-E_3uwYSBrhTjvlZJeXYUeVHfKo8pJSSsw3RZEjBW4Tt0eFmCZnFErtTyk3oUPaYVP-8YLLAenhUDV4Lm1dC4dxqUj0Oh6XrWgIb-eYHGolMY9g9xbgyd4ir39RodA_1DOjzHWpNfCM-J5ZOtfpuKCAe5__u7L8FT0m56XOxcDoVVsz1J1VNrACWAGbhDWNjyHfL5E2Q" }` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWT([]byte(jwtText), cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} + +func TestGenerateOktaJWTPEM(t *testing.T) { + // jwtText is generated by https://mkjwk.org/ using the instructions at + // https://developer.okta.com/docs/guides/dpop/nonoktaresourceserver/main/#create-the-json-web-token + const jwtText = ` +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCOuef3HMRhohVT +5kSoAJgV+atpDjkwTwkOq+ImnbBlv75GaApG90w8VpjXjhqN/1KJmwfyrKiquiMq +OPu+o/672Dys5rUAaWSbT7wRF1GjLDDZrM0GHRdV4DGxM/LKI8I5yE1Mx3EzV+D5 +ZLmcRc5U4oEoMwtGpr0zRZ7uUr6a28UQwcUsVIPItc1/9rERlo1WTv8dcaj4ECC3 +2Sc0y/F+9XqwJvLd4Uv6ckzP0Sv4tbDA+7jpD9MneAIUiZ4LVj2cwbBd+YRY6jXx +MkevcCSmSX60clBY1cIFkw1DYHqtdHEwAQcQHLGMoi72xRP2qrdzIPsaTKVYoHVo +WA9vADdHAgMBAAECggEAIlx7jjCsztyYyeQsL05FTzUWoWo9NnYwtgmHnshkCXsK +MiUmJEOxZO1sSqj5l6oakupyFWigCspZYPbrFNCiqVK7+NxqQzkccY/WtT6p9uDS +ufUyPwCN96zMCd952lSVlBe3FH8Hr9a+YQxw60CbFjCZ67WuR0opTsi6JKJjJSDb +TQQZ4qJR97D05I1TgfmO+VO7G/0/dDaNHnnlYz0AnOgZPSyvrU2G5cYye4842EMB +ng81xjHD+xp55JNui/xYkhmYspYhrB2KlEjkKb08OInUjBeaLEAgA1r9yOHsfV/3 +DQzDPRO9iuqx5BfJhdIqUB1aifrye+sbxt9uMBtUgQKBgQDVdfO3GYT+ZycOQG9P +QtdMn6uiSddchVCGFpk331u6M6yafCKjI/MlJDl29B+8R5sVsttwo8/qnV/xd3cn +pY14HpKAsE4l6/Ciagzoj+0NqfPEDhEzbo8CyArcd7pSxt3XxECAfZe2+xivEPHe +gFO60vSFjFtvlLRMDMOmqX3kYQKBgQCrK1DISyQTnD6/axsgh2/ESOmT7n+JRMx/ +YzA7Lxu3zGzUC8/sRDa1C41t054nf5ZXJueYLDSc4kEAPddzISuCLxFiTD2FQ75P +lHWMgsEzQObDm4GPE9cdKOjoAvtAJwbvZcjDa029CDx7aCaDzbNvdmplZ7EUrznR +55U8Wsm8pwKBgBytxTmzZwfbCgdDJvFKNKzpwuCB9TpL+v6Y6Kr2Clfg+26iAPFU +MiWqUUInGGBuamqm5g6jI5sM28gQWeTsvC4IRXyes1Eq+uCHSQax15J/Y+3SSgNT +9kjUYYkvWMwoRcPobRYWSZze7XkP2L8hFJ7EGvAaZGqAWxzgliS9HtnhAoGAONZ/ +UqMw7Zoac/Ga5mhSwrj7ZvXxP6Gqzjofj+eKqrOlB5yMhIX6LJATfH6iq7cAMxxm +Fu/G4Ll4oB3o5wACtI3wldV/MDtYfJBtoCTjBqPsfNOsZ9hMvBATlsc2qwzKjsAb +tFhzTevoOYpSD75EcSS/G8Ec2iN9bagatBnpl00CgYBVqAOFZelNfP7dj//lpk8y +EUAw7ABOq0S9wkpFWTXIVPoBQUipm3iAUqGNPmvr/9ShdZC9xeu5AwKram4caMWJ +ExRhcDP1hFM6CdmSkIYEgBKvN9N0O4Lx1ba34gk74Hm65KXxokjJHOC0plO7c7ok +LNV/bIgMHOMoxiGrwyjAhg== +-----END PRIVATE KEY----- +` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWTPEM(jwtText, cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} diff --git a/x-pack/filebeat/input/httpjson/config_test.go b/x-pack/filebeat/input/httpjson/config_test.go index 74e72ded3323..d88c6ac4a625 100644 --- a/x-pack/filebeat/input/httpjson/config_test.go +++ b/x-pack/filebeat/input/httpjson/config_test.go @@ -464,7 +464,7 @@ func TestConfigOauth2Validation(t *testing.T) { }, { name: "okta requires token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file to be provided", - expectedErr: "okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided accessing 'auth.oauth2'", + expectedErr: "okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided accessing 'auth.oauth2'", input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ "provider": "okta", diff --git a/x-pack/filebeat/input/httpjson/input.go b/x-pack/filebeat/input/httpjson/input.go index 928c056d2d39..50a4f7f20a61 100644 --- a/x-pack/filebeat/input/httpjson/input.go +++ b/x-pack/filebeat/input/httpjson/input.go @@ -122,6 +122,16 @@ func run(ctx v2.Context, cfg config, pub inputcursor.Publisher, crsr *inputcurso if cfg.Request.Tracer != nil { id := sanitizeFileName(ctx.ID) cfg.Request.Tracer.Filename = strings.ReplaceAll(cfg.Request.Tracer.Filename, "*", id) + + // Propagate tracer behaviour to all chain children. + for i, c := range cfg.Chain { + if c.Step != nil { // Request is validated as required. + cfg.Chain[i].Step.Request.Tracer = cfg.Request.Tracer + } + if c.While != nil { // Request is validated as required. + cfg.Chain[i].While.Request.Tracer = cfg.Request.Tracer + } + } } metrics := newInputMetrics(reg) @@ -243,7 +253,12 @@ func newNetHTTPClient(ctx context.Context, cfg *requestConfig, log *logp.Logger, ) traceLogger := zap.New(core) - netHTTPClient.Transport = httplog.NewLoggingRoundTripper(netHTTPClient.Transport, traceLogger) + const margin = 1e3 // 1OkB ought to be enough room for all the remainder of the trace details. + maxSize := cfg.Tracer.MaxSize*1e6 - margin + if maxSize < 0 { + maxSize = 0 + } + netHTTPClient.Transport = httplog.NewLoggingRoundTripper(netHTTPClient.Transport, traceLogger, maxSize) } if reg != nil { diff --git a/x-pack/filebeat/input/internal/httplog/roundtripper.go b/x-pack/filebeat/input/internal/httplog/roundtripper.go index 4f0eb9eb670a..eac54d7378f5 100644 --- a/x-pack/filebeat/input/internal/httplog/roundtripper.go +++ b/x-pack/filebeat/input/internal/httplog/roundtripper.go @@ -32,9 +32,10 @@ type contextKey string // NewLoggingRoundTripper returns a LoggingRoundTripper that logs requests and // responses to the provided logger. -func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger) *LoggingRoundTripper { +func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger, maxBodyLen int) *LoggingRoundTripper { return &LoggingRoundTripper{ transport: next, + maxBodyLen: maxBodyLen, logger: logger, txBaseID: newID(), txIDCounter: atomic.NewUint64(0), @@ -44,6 +45,7 @@ func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger) *Logging // LoggingRoundTripper is an http.RoundTripper that logs requests and responses. type LoggingRoundTripper struct { transport http.RoundTripper + maxBodyLen int // The maximum length of a body. Longer bodies will be truncated. logger *zap.Logger // Destination logger. txBaseID string // Random value to make transaction IDs unique. txIDCounter *atomic.Uint64 // Transaction ID counter that is incremented for each request. @@ -63,6 +65,7 @@ type LoggingRoundTripper struct { // http.request // user_agent.original // http.request.body.content +// http.request.body.truncated // http.request.body.bytes // http.request.mime_type // event.original (the request without body from httputil.DumpRequestOut) @@ -71,6 +74,7 @@ type LoggingRoundTripper struct { // // http.response.status_code // http.response.body.content +// http.response.body.truncated // http.response.body.bytes // http.response.mime_type // event.original (the response without body from httputil.DumpResponse) @@ -86,7 +90,7 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err } } - req, respParts, errorsMessages := logRequest(log, req) + req, respParts, errorsMessages := logRequest(log, req, rt.maxBodyLen) resp, err := rt.transport.RoundTrip(req) if err != nil { @@ -107,7 +111,8 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err errorsMessages = append(errorsMessages, fmt.Sprintf("failed to read response body: %s", err)) } else { respParts = append(respParts, - zap.ByteString("http.response.body.content", body), + zap.ByteString("http.response.body.content", body[:min(len(body), rt.maxBodyLen)]), + zap.Bool("http.response.body.truncated", rt.maxBodyLen < len(body)), zap.Int("http.response.body.bytes", len(body)), zap.String("http.response.mime_type", resp.Header.Get("Content-Type")), ) @@ -143,17 +148,18 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err // http.request // user_agent.original // http.request.body.content +// http.request.body.truncated // http.request.body.bytes // http.request.mime_type // event.original (the request without body from httputil.DumpRequestOut) // // Additional fields in extra will also be logged. -func LogRequest(log *zap.Logger, req *http.Request, extra ...zapcore.Field) *http.Request { - req, _, _ = logRequest(log, req, extra...) +func LogRequest(log *zap.Logger, req *http.Request, maxBodyLen int, extra ...zapcore.Field) *http.Request { + req, _, _ = logRequest(log, req, maxBodyLen, extra...) return req } -func logRequest(log *zap.Logger, req *http.Request, extra ...zapcore.Field) (_ *http.Request, parts []zapcore.Field, errorsMessages []string) { +func logRequest(log *zap.Logger, req *http.Request, maxBodyLen int, extra ...zapcore.Field) (_ *http.Request, parts []zapcore.Field, errorsMessages []string) { reqParts := append([]zapcore.Field{ zap.String("url.original", req.URL.String()), zap.String("url.scheme", req.URL.Scheme), @@ -174,7 +180,8 @@ func logRequest(log *zap.Logger, req *http.Request, extra ...zapcore.Field) (_ * errorsMessages = append(errorsMessages, fmt.Sprintf("failed to read request body: %s", err)) } else { reqParts = append(reqParts, - zap.ByteString("http.request.body.content", body), + zap.ByteString("http.request.body.content", body[:min(len(body), maxBodyLen)]), + zap.Bool("http.request.body.truncated", maxBodyLen < len(body)), zap.Int("http.request.body.bytes", len(body)), zap.String("http.request.mime_type", req.Header.Get("Content-Type")), ) diff --git a/x-pack/filebeat/input/lumberjack/server_test.go b/x-pack/filebeat/input/lumberjack/server_test.go index 1c2aa2de3e08..c7db6abf0b54 100644 --- a/x-pack/filebeat/input/lumberjack/server_test.go +++ b/x-pack/filebeat/input/lumberjack/server_test.go @@ -52,7 +52,8 @@ func TestServer(t *testing.T) { c := makeTestConfig() c.TLS = serverConf // Disable mTLS requirements in the server. - c.TLS.ClientAuth = 0 // tls.NoClientCert + var clientAuth = tlscommon.TLSClientAuthNone + c.TLS.ClientAuth = &clientAuth c.TLS.VerificationMode = tlscommon.VerifyNone testSendReceive(t, c, 10, clientConf) @@ -127,7 +128,7 @@ func sendData(ctx context.Context, t testing.TB, bindAddress string, numberOfEve }() t.Log("Lumberjack client connected.") - var events []interface{} + events := make([]interface{}, 0, numberOfEvents) for i := 0; i < numberOfEvents; i++ { events = append(events, map[string]interface{}{ "message": "hello world!", @@ -220,11 +221,12 @@ func tlsSetup(t *testing.T) (clientConfig *tls.Config, serverConfig *tlscommon.S MinVersion: tls.VersionTLS12, } + var clientAuth = tlscommon.TLSClientAuthRequired + serverConfig = &tlscommon.ServerConfig{ // NOTE: VerifyCertificate is ineffective unless ClientAuth is set to RequireAndVerifyClientCert. VerificationMode: tlscommon.VerifyCertificate, - // Unfortunately ServerConfig uses an unexported type in an exported field. - ClientAuth: 4, // tls.RequireAndVerifyClientCert + ClientAuth: &clientAuth, // tls.RequireAndVerifyClientCert CAs: []string{ string(certData.ca.CertPEM(t)), }, diff --git a/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml b/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml index 6716568ba141..3d8747586153 100644 --- a/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml +++ b/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml @@ -19,9 +19,8 @@ request.transforms: value: "MdatpPartner-Elastic-Filebeat/1.0.0" - set: target: "url.params.$filter" - value: 'lastUpdateTime gt [[formatDate .cursor.lastUpdateTime "2006-01-02T15:04:05.9999999Z"]]' + value: 'lastUpdateTime gt [[.cursor.lastUpdateTime]]' default: 'lastUpdateTime gt [[formatDate (now (parseDuration "-55m")) "2006-01-02T15:04:05.9999999Z"]]' - response.split: target: body.value ignore_empty_value: true @@ -31,10 +30,10 @@ response.split: split: target: body.alerts.entities keep_parent: true - cursor: lastUpdateTime: - value: "[[.last_response.body.lastUpdateTime]]" + value: "[[.last_event.lastUpdateTime]]" + ignore_empty_value: true {{ else if eq .input "file" }} diff --git a/x-pack/filebeat/tests/integration/managerV2_test.go b/x-pack/filebeat/tests/integration/managerV2_test.go index 3332d549fa20..b541b8d54093 100644 --- a/x-pack/filebeat/tests/integration/managerV2_test.go +++ b/x-pack/filebeat/tests/integration/managerV2_test.go @@ -7,21 +7,51 @@ package integration import ( + "bufio" + "crypto/tls" + "crypto/x509" + "encoding/json" "fmt" + "io" + "math" "os" "path/filepath" + "strings" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + protobuf "google.golang.org/protobuf/proto" "github.com/elastic/beats/v7/libbeat/tests/integration" + "github.com/elastic/beats/v7/libbeat/version" + "github.com/elastic/beats/v7/testing/certutil" "github.com/elastic/beats/v7/x-pack/libbeat/management" "github.com/elastic/elastic-agent-client/v7/pkg/client/mock" "github.com/elastic/elastic-agent-client/v7/pkg/proto" ) +// Event is the common part of a beats event, the beats and Elastic Agent +// metadata. +type Event struct { + Metadata struct { + Version string `json:"version"` + } `json:"@metadata"` + ElasticAgent struct { + Snapshot bool `json:"snapshot"` + Version string `json:"version"` + Id string `json:"id"` + } `json:"elastic_agent"` + Agent struct { + Version string `json:"version"` + Id string `json:"id"` + } `json:"agent"` +} + // TestInputReloadUnderElasticAgent will start a Filebeat and cause the input // reload issue described on https://github.com/elastic/beats/issues/33653. // In short, a new input for a file needs to be started while there are still @@ -500,6 +530,208 @@ func TestRecoverFromInvalidOutputConfiguration(t *testing.T) { } } +func TestAgentPackageVersionOnStartUpInfo(t *testing.T) { + wantVersion := "8.13.0+build20131123" + + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + logFilePath := filepath.Join(filebeat.TempDir(), "logs-to-ingest.log") + generateLogFile(t, logFilePath) + + eventsDir := filepath.Join(filebeat.TempDir(), "ingested-events") + logLevel := proto.UnitLogLevel_INFO + units := []*proto.UnitExpected{ + { + Id: "output-file-unit", + Type: proto.UnitType_OUTPUT, + ConfigStateIdx: 1, + State: proto.State_HEALTHY, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "default", + Type: "file", + Name: "events-to-file", + Source: integration.RequireNewStruct(t, + map[string]interface{}{ + "name": "events-to-file", + "type": "file", + "path": eventsDir, + }), + }, + }, + { + Id: "input-unit-1", + Type: proto.UnitType_INPUT, + ConfigStateIdx: 1, + State: proto.State_HEALTHY, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "filestream-monitoring-agent", + Type: "filestream", + Name: "filestream-monitoring-agent", + Streams: []*proto.Stream{ + { + Id: "log-input-1", + Source: integration.RequireNewStruct(t, map[string]interface{}{ + "enabled": true, + "type": "log", + "paths": []interface{}{logFilePath}, + }), + }, + }, + }, + }, + } + wantAgentInfo := proto.AgentInfo{ + Id: "agent-id", + Version: wantVersion, + Snapshot: true, + } + + observedCh := make(chan *proto.CheckinObserved, 5) + server := &mock.StubServerV2{ + CheckinV2Impl: func(observed *proto.CheckinObserved) *proto.CheckinExpected { + observedCh <- observed + return &proto.CheckinExpected{ + AgentInfo: &wantAgentInfo, + Units: units, + } + }, + ActionImpl: func(response *proto.ActionResponse) error { return nil }, + } + + rootKey, rootCACert, rootCertPem, err := certutil.NewRootCA() + require.NoError(t, err, "could not generate root CA") + + rootCertPool := x509.NewCertPool() + ok := rootCertPool.AppendCertsFromPEM(rootCertPem) + require.Truef(t, ok, "could not append certs from PEM to cert pool") + + beatPrivKeyPem, beatCertPem, beatTLSCert, err := + certutil.GenerateChildCert("localhost", rootKey, rootCACert) + require.NoError(t, err, "could not generate child TLS certificate") + + getCert := func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + // it's one of the child certificates. As there is only one, return it + return beatTLSCert, nil + } + + creds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: rootCertPool, + GetCertificate: getCert, + MinVersion: tls.VersionTLS12, + }) + err = server.Start(grpc.Creds(creds)) + require.NoError(t, err, "failed starting GRPC server") + t.Cleanup(server.Stop) + + filebeat.Start("-E", "management.enabled=true") + + startUpInfo := &proto.StartUpInfo{ + Addr: fmt.Sprintf("localhost:%d", server.Port), + ServerName: "localhost", + Token: "token", + CaCert: rootCertPem, + PeerCert: beatCertPem, + PeerKey: beatPrivKeyPem, + Services: []proto.ConnInfoServices{proto.ConnInfoServices_CheckinV2}, + AgentInfo: &wantAgentInfo, + } + writeStartUpInfo(t, filebeat.Stdin(), startUpInfo) + // for some reason the pipe needs to be closed for filebeat to read it. + require.NoError(t, filebeat.Stdin().Close(), "failed closing stdin pipe") + + // get 1st observed + observed := <-observedCh + // drain observedCh so server won't block + go func() { + for { + <-observedCh + } + }() + + msg := strings.Builder{} + require.Eventuallyf(t, func() bool { + msg.Reset() + + _, err = os.Stat(eventsDir) + if err != nil { + fmt.Fprintf(&msg, "could not verify output directory exists: %v", + err) + return false + } + + entries, err := os.ReadDir(eventsDir) + if err != nil { + fmt.Fprintf(&msg, "failed checking output directory for files: %v", + err) + return false + } + + if len(entries) == 0 { + fmt.Fprintf(&msg, "no file found on %s", eventsDir) + return false + } + + for _, e := range entries { + if e.IsDir() { + continue + } + + i, err := e.Info() + if err != nil { + fmt.Fprintf(&msg, "could not read info of %q", e.Name()) + return false + } + if i.Size() == 0 { + fmt.Fprintf(&msg, "file %q was created, but it's still empty", + e.Name()) + return false + } + + // read one line to make sure it isn't a 1/2 written JSON + eventsFile := filepath.Join(eventsDir, e.Name()) + f, err := os.Open(eventsFile) + if err != nil { + fmt.Fprintf(&msg, "could not open file %q", eventsFile) + return false + } + + scanner := bufio.NewScanner(f) + if scanner.Scan() { + var ev Event + err := json.Unmarshal(scanner.Bytes(), &ev) + if err != nil { + fmt.Fprintf(&msg, "failed to read event from file: %v", err) + return false + } + return true + } + } + + return true + }, 30*time.Second, time.Second, "no event was produced: %s", &msg) + + assert.Equal(t, version.Commit(), observed.VersionInfo.BuildHash) + + evs := getEventsFromFileOutput[Event](t, eventsDir, 100) + for _, got := range evs { + assert.Equal(t, wantVersion, got.Metadata.Version) + + assert.Equal(t, wantAgentInfo.Id, got.ElasticAgent.Id) + assert.Equal(t, wantAgentInfo.Version, got.ElasticAgent.Version) + assert.Equal(t, wantAgentInfo.Snapshot, got.ElasticAgent.Snapshot) + + assert.Equal(t, wantAgentInfo.Id, got.Agent.Id) + assert.Equal(t, wantVersion, got.Agent.Version) + } +} + // generateLogFile generates a log file by appending the current // time to it every second. func generateLogFile(t *testing.T, fullPath string) { @@ -543,3 +775,52 @@ func generateLogFile(t *testing.T, fullPath string) { } }() } + +// getEventsFromFileOutput reads all events from all the files on dir. If n > 0, +// then it reads up to n events. It considers all files are ndjson, and it skips +// any directory within dir. +func getEventsFromFileOutput[E any](t *testing.T, dir string, n int) []E { + t.Helper() + + if n < 1 { + n = math.MaxInt + } + + var events []E + entries, err := os.ReadDir(dir) + require.NoError(t, err, "could not read events directory") + for _, e := range entries { + if e.IsDir() { + continue + } + f, err := os.Open(filepath.Join(dir, e.Name())) + require.NoErrorf(t, err, "could not open file %q", e.Name()) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + var ev E + err := json.Unmarshal(scanner.Bytes(), &ev) + require.NoError(t, err, "failed to read event") + events = append(events, ev) + + if len(events) >= n { + return events + } + } + } + + return events +} + +func writeStartUpInfo(t *testing.T, w io.Writer, info *proto.StartUpInfo) { + t.Helper() + if len(info.Services) == 0 { + info.Services = []proto.ConnInfoServices{proto.ConnInfoServices_CheckinV2} + } + + infoBytes, err := protobuf.Marshal(info) + require.NoError(t, err, "failed to marshal connection information") + + _, err = w.Write(infoBytes) + require.NoError(t, err, "failed to write connection information") +} diff --git a/x-pack/libbeat/Jenkinsfile.yml b/x-pack/libbeat/Jenkinsfile.yml index 9d4ecfa7bd08..9947fd0096c6 100644 --- a/x-pack/libbeat/Jenkinsfile.yml +++ b/x-pack/libbeat/Jenkinsfile.yml @@ -27,6 +27,43 @@ stages: branches: true ## for all the branches tags: true ## for all the tags stage: extended + ## For now Windows CI tests for Libbeat are only enabled for ETW + ## It only contains Go tests + windows-2022: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2022" + stage: mandatory + windows-2019: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2019" + stage: extended_win + windows-2016: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2016" + stage: mandatory + windows-2012: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2012-r2" + stage: extended_win + windows-11: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-11" + stage: extended_win + windows-10: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + stage: extended_win + windows-8: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-8" + stage: extended_win unitTest: mage: "mage build unitTest" stage: mandatory diff --git a/x-pack/libbeat/common/aws/credentials.go b/x-pack/libbeat/common/aws/credentials.go index 84e88d10422b..f6efde3e2b20 100644 --- a/x-pack/libbeat/common/aws/credentials.go +++ b/x-pack/libbeat/common/aws/credentials.go @@ -10,6 +10,7 @@ import ( "fmt" "net/http" "net/url" + "time" "github.com/aws/aws-sdk-go-v2/service/sts" @@ -44,6 +45,13 @@ type ConfigAWS struct { FIPSEnabled bool `config:"fips_enabled"` TLS *tlscommon.Config `config:"ssl" yaml:"ssl,omitempty" json:"ssl,omitempty"` DefaultRegion string `config:"default_region"` + + // The duration of the role session. Defaults to 15m when not set. + AssumeRoleDuration time.Duration `config:"assume_role.duration"` + + // AssumeRoleExpiryWindow will allow the credentials to trigger refreshing prior to the credentials + // actually expiring. If expiry_window is less than or equal to zero, the setting is ignored. + AssumeRoleExpiryWindow time.Duration `config:"assume_role.expiry_window"` } // InitializeAWSConfig function creates the awssdk.Config object from the provided config @@ -154,8 +162,15 @@ func addAssumeRoleProviderToAwsConfig(config ConfigAWS, awsConfig *awssdk.Config if config.ExternalID != "" { aro.ExternalID = awssdk.String(config.ExternalID) } + if config.AssumeRoleDuration > 0 { + aro.Duration = config.AssumeRoleDuration + } + }) + awsConfig.Credentials = awssdk.NewCredentialsCache(stsCredProvider, func(options *awssdk.CredentialsCacheOptions) { + if config.AssumeRoleExpiryWindow > 0 { + options.ExpiryWindow = config.AssumeRoleExpiryWindow + } }) - awsConfig.Credentials = stsCredProvider } // addStaticCredentialsProviderToAwsConfig adds a static credentials provider to the current AWS config by using the keys stored in Beats config diff --git a/x-pack/libbeat/docs/aws-credentials-config.asciidoc b/x-pack/libbeat/docs/aws-credentials-config.asciidoc index 172142d1aa82..423e241f8963 100644 --- a/x-pack/libbeat/docs/aws-credentials-config.asciidoc +++ b/x-pack/libbeat/docs/aws-credentials-config.asciidoc @@ -15,6 +15,9 @@ To configure AWS credentials, either put the credentials into the {beatname_uc} * *fips_enabled*: Enabling this option instructs {beatname_uc} to use the FIPS endpoint of a service. All services used by {beatname_uc} are FIPS compatible except for `tagging` but only certain regions are FIPS compatible. See https://aws.amazon.com/compliance/fips/ or the appropriate service page, https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html, for a full list of FIPS endpoints and regions. * *ssl*: This specifies SSL/TLS configuration. If the ssl section is missing, the host's CAs are used for HTTPS connections. See <> for more information. * *default_region*: Default region to query if no other region is set. Most AWS services offer a regional endpoint that can be used to make requests. Some services, such as IAM, do not support regions. If a region is not provided by any other way (environment variable, credential or instance profile), the value set here will be used. +* *assume_role.duration*: The duration of the requested assume role session. Defaults to 15m when not set. AWS allows a maximum session duration between 1h and 12h depending on your maximum session duration policies. +* *assume_role.expiry_window*: The expiry_window will allow refreshing the session prior to its expiration. + This is beneficial to prevent expiring tokens from causing requests to fail with an ExpiredTokenException. [float] ==== Supported Formats diff --git a/x-pack/libbeat/management/managerV2.go b/x-pack/libbeat/management/managerV2.go index 235325c0cbfc..71b14152c654 100644 --- a/x-pack/libbeat/management/managerV2.go +++ b/x-pack/libbeat/management/managerV2.go @@ -23,16 +23,15 @@ import ( "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/features" + lbmanagement "github.com/elastic/beats/v7/libbeat/management" + "github.com/elastic/beats/v7/libbeat/publisher" + "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" - - "github.com/elastic/beats/v7/libbeat/common/reload" - lbmanagement "github.com/elastic/beats/v7/libbeat/management" - "github.com/elastic/beats/v7/libbeat/publisher" - "github.com/elastic/beats/v7/libbeat/version" ) // diagnosticHandler is a wrapper type that's a bit of a hack, the compiler won't let us send the raw unit struct, @@ -161,6 +160,13 @@ func NewV2AgentManager(config *conf.C, registry *reload.Registry) (lbmanagement. } } + versionInfo := client.VersionInfo{ + Name: "beat-v2-client", + BuildHash: version.Commit(), + Meta: map[string]string{ + "commit": version.Commit(), + "build_time": version.BuildTime().String(), + }} var agentClient client.V2 var err error if c.InsecureGRPCURLForTesting != "" && c.Enabled { @@ -168,20 +174,11 @@ func NewV2AgentManager(config *conf.C, registry *reload.Registry) (lbmanagement. logger.Info("Using INSECURE GRPC connection, this should be only used for testing!") agentClient = client.NewV2(c.InsecureGRPCURLForTesting, "", // Insecure connection for test, no token needed - client.VersionInfo{ - Name: "beat-v2-client-for-testing", - Version: version.GetDefaultVersion(), - }, client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) + versionInfo, + client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) } else { // Normal Elastic-Agent-Client initialisation - agentClient, _, err = client.NewV2FromReader(os.Stdin, client.VersionInfo{ - Name: "beat-v2-client", - Version: version.GetDefaultVersion(), - Meta: map[string]string{ - "commit": version.Commit(), - "build_time": version.BuildTime().String(), - }, - }) + agentClient, _, err = client.NewV2FromReader(os.Stdin, versionInfo) if err != nil { return nil, fmt.Errorf("error reading control config from agent: %w", err) } @@ -231,6 +228,14 @@ func NewV2AgentManagerWithClient(config *Config, registry *reload.Registry, agen // Beats central management interface implementation // ================================ +func (cm *BeatV2Manager) AgentInfo() client.AgentInfo { + if cm.client.AgentInfo() == nil { + return client.AgentInfo{} + } + + return *cm.client.AgentInfo() +} + // RegisterDiagnosticHook will register a diagnostic callback function when elastic-agent asks for a diagnostics dump func (cm *BeatV2Manager) RegisterDiagnosticHook(name string, description string, filename string, contentType string, hook client.DiagnosticHook) { cm.client.RegisterDiagnosticHook(name, description, filename, contentType, hook) diff --git a/x-pack/libbeat/management/managerV2_test.go b/x-pack/libbeat/management/managerV2_test.go index ea67fdd89f40..66ca7f17966c 100644 --- a/x-pack/libbeat/management/managerV2_test.go +++ b/x-pack/libbeat/management/managerV2_test.go @@ -204,8 +204,7 @@ func TestManagerV2(t *testing.T) { defer srv.Stop() client := client.NewV2(fmt.Sprintf(":%d", srv.Port), "", client.VersionInfo{ - Name: "program", - Version: "v1.0.0", + Name: "program", Meta: map[string]string{ "key": "value", }, diff --git a/x-pack/libbeat/management/tests/mock_server.go b/x-pack/libbeat/management/tests/mock_server.go index 8671b1242339..a90ae633885d 100644 --- a/x-pack/libbeat/management/tests/mock_server.go +++ b/x-pack/libbeat/management/tests/mock_server.go @@ -31,7 +31,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U unitOutID := mock.NewID() token := mock.NewID() - //var gotConfig bool + // var gotConfig bool var mut sync.Mutex @@ -98,8 +98,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U require.NoError(t, err) client := client.NewV2(fmt.Sprintf(":%d", srv.Port), token, client.VersionInfo{ - Name: "program", - Version: "v1.0.0", + Name: "program", Meta: map[string]string{ "key": "value", }, @@ -111,7 +110,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U // helper to wrap the CheckinExpected config we need with every refresh of the mock server func sendUnitsWithState(state proto.State, input, output *proto.UnitExpectedConfig, inId, outId string, stateIndex uint64) *proto.CheckinExpected { return &proto.CheckinExpected{ - AgentInfo: &proto.CheckinAgentInfo{ + AgentInfo: &proto.AgentInfo{ Id: "test-agent", Version: "8.4.0", Snapshot: true, diff --git a/x-pack/libbeat/reader/etw/config.go b/x-pack/libbeat/reader/etw/config.go new file mode 100644 index 000000000000..44f9e68ff2d0 --- /dev/null +++ b/x-pack/libbeat/reader/etw/config.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package etw + +type Config struct { + Logfile string // Path to the logfile + ProviderGUID string // GUID of the ETW provider + ProviderName string // Name of the ETW provider + SessionName string // Name for new ETW session + TraceLevel string // Level of tracing (e.g., "verbose") + MatchAnyKeyword uint64 // Filter for any matching keywords (bitmask) + MatchAllKeyword uint64 // Filter for all matching keywords (bitmask) + Session string // Existing session to attach +} diff --git a/x-pack/libbeat/reader/etw/controller.go b/x-pack/libbeat/reader/etw/controller.go new file mode 100644 index 000000000000..f17866440cfc --- /dev/null +++ b/x-pack/libbeat/reader/etw/controller.go @@ -0,0 +1,121 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "syscall" +) + +// AttachToExistingSession queries the status of an existing ETW session. +// On success, it updates the Session's handler with the queried information. +func (s *Session) AttachToExistingSession() error { + // Convert the session name to UTF16 for Windows API compatibility. + sessionNamePtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert session name: %w", err) + } + + // Query the current state of the ETW session. + err = s.controlTrace(0, sessionNamePtr, s.properties, EVENT_TRACE_CONTROL_QUERY) + switch { + case err == nil: + // Get the session handler from the properties struct. + s.handler = uintptr(s.properties.Wnode.Union1) + + return nil + + // Handle specific errors related to the query operation. + case errors.Is(err, ERROR_BAD_LENGTH): + return fmt.Errorf("bad length when querying handler: %w", err) + case errors.Is(err, ERROR_INVALID_PARAMETER): + return fmt.Errorf("invalid parameters when querying handler: %w", err) + case errors.Is(err, ERROR_WMI_INSTANCE_NOT_FOUND): + return fmt.Errorf("session is not running: %w", err) + default: + return fmt.Errorf("failed to get handler: %w", err) + } +} + +// CreateRealtimeSession initializes and starts a new real-time ETW session. +func (s *Session) CreateRealtimeSession() error { + // Convert the session name to UTF16 format for Windows API compatibility. + sessionPtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert session name: %w", err) + } + + // Start the ETW trace session. + err = s.startTrace(&s.handler, sessionPtr, s.properties) + switch { + case err == nil: + + // Handle specific errors related to starting the trace session. + case errors.Is(err, ERROR_ALREADY_EXISTS): + return fmt.Errorf("session already exists: %w", err) + case errors.Is(err, ERROR_INVALID_PARAMETER): + return fmt.Errorf("invalid parameters when starting session trace: %w", err) + default: + return fmt.Errorf("failed to start trace: %w", err) + } + + // Set additional parameters for trace enabling. + // See https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-enable_trace_parameters#members + params := EnableTraceParameters{ + Version: 2, // ENABLE_TRACE_PARAMETERS_VERSION_2 + } + + // Zero timeout means asynchronous enablement + const timeout = 0 + + // Enable the trace session with extended options. + err = s.enableTrace(s.handler, &s.GUID, EVENT_CONTROL_CODE_ENABLE_PROVIDER, s.traceLevel, s.matchAnyKeyword, s.matchAllKeyword, timeout, ¶ms) + switch { + case err == nil: + return nil + // Handle specific errors related to enabling the trace session. + case errors.Is(err, ERROR_INVALID_PARAMETER): + return fmt.Errorf("invalid parameters when enabling session trace: %w", err) + case errors.Is(err, ERROR_TIMEOUT): + return fmt.Errorf("timeout value expired before the enable callback completed: %w", err) + case errors.Is(err, ERROR_NO_SYSTEM_RESOURCES): + return fmt.Errorf("exceeded the number of trace sessions that can enable the provider: %w", err) + default: + return fmt.Errorf("failed to enable trace: %w", err) + } +} + +// StopSession closes the ETW session and associated handles if they were created. +func (s *Session) StopSession() error { + if !s.Realtime { + return nil + } + + if isValidHandler(s.traceHandler) { + // Attempt to close the trace and handle potential errors. + if err := s.closeTrace(s.traceHandler); err != nil && !errors.Is(err, ERROR_CTX_CLOSE_PENDING) { + return fmt.Errorf("failed to close trace: %w", err) + } + } + + if s.NewSession { + // If we created the session, send a control command to stop it. + return s.controlTrace( + s.handler, + nil, + s.properties, + EVENT_TRACE_CONTROL_STOP, + ) + } + + return nil +} + +func isValidHandler(handler uint64) bool { + return handler != 0 && handler != INVALID_PROCESSTRACE_HANDLE +} diff --git a/x-pack/libbeat/reader/etw/controller_test.go b/x-pack/libbeat/reader/etw/controller_test.go new file mode 100644 index 000000000000..0c663433ad1f --- /dev/null +++ b/x-pack/libbeat/reader/etw/controller_test.go @@ -0,0 +1,190 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +func TestAttachToExistingSession_Error(t *testing.T) { + // Mock implementation of controlTrace + controlTrace := func(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + return ERROR_WMI_INSTANCE_NOT_FOUND + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + controlTrace: controlTrace, + } + + err := session.AttachToExistingSession() + assert.EqualError(t, err, "session is not running: The instance name passed was not recognized as valid by a WMI data provider.") +} + +func TestAttachToExistingSession_Success(t *testing.T) { + // Mock implementation of controlTrace + controlTrace := func(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + // Set a mock handler value + properties.Wnode.Union1 = 12345 + return nil + } + + // Create a Session instance with initialized Properties + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + controlTrace: controlTrace, + } + + err := session.AttachToExistingSession() + + assert.NoError(t, err) + assert.Equal(t, uintptr(12345), session.handler, "Handler should be set to the mock value") +} + +func TestCreateRealtimeSession_StartTraceError(t *testing.T) { + // Mock implementation of startTrace + startTrace := func(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + return ERROR_ALREADY_EXISTS + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + startTrace: startTrace, + } + + err := session.CreateRealtimeSession() + assert.EqualError(t, err, "session already exists: Cannot create a file when that file already exists.") +} + +func TestCreateRealtimeSession_EnableTraceError(t *testing.T) { + // Mock implementations + startTrace := func(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + *traceHandle = 12345 // Mock handler value + return nil + } + + enableTrace := func(traceHandle uintptr, + providerId *windows.GUID, + isEnabled uint32, + level uint8, + matchAnyKeyword uint64, + matchAllKeyword uint64, + enableProperty uint32, + enableParameters *EnableTraceParameters) error { + return ERROR_INVALID_PARAMETER + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + startTrace: startTrace, + enableTrace: enableTrace, + } + + err := session.CreateRealtimeSession() + assert.EqualError(t, err, "invalid parameters when enabling session trace: The parameter is incorrect.") +} + +func TestCreateRealtimeSession_Success(t *testing.T) { + // Mock implementations + startTrace := func(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + *traceHandle = 12345 // Mock handler value + return nil + } + + enableTrace := func(traceHandle uintptr, + providerId *windows.GUID, + isEnabled uint32, + level uint8, + matchAnyKeyword uint64, + matchAllKeyword uint64, + enableProperty uint32, + enableParameters *EnableTraceParameters) error { + return nil + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + startTrace: startTrace, + enableTrace: enableTrace, + } + + err := session.CreateRealtimeSession() + + assert.NoError(t, err) + assert.Equal(t, uintptr(12345), session.handler, "Handler should be set to the mock value") +} + +func TestStopSession_Error(t *testing.T) { + // Mock implementation of closeTrace + closeTrace := func(traceHandle uint64) error { + return ERROR_INVALID_PARAMETER + } + + // Create a Session instance + session := &Session{ + Realtime: true, + NewSession: true, + traceHandler: 12345, // Example handler value + properties: &EventTraceProperties{}, + closeTrace: closeTrace, + } + + err := session.StopSession() + assert.EqualError(t, err, "failed to close trace: The parameter is incorrect.") +} + +func TestStopSession_Success(t *testing.T) { + // Mock implementations + closeTrace := func(traceHandle uint64) error { + return nil + } + + controlTrace := func(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + // Set a mock handler value + return nil + } + + // Create a Session instance + session := &Session{ + Realtime: true, + NewSession: true, + traceHandler: 12345, // Example handler value + properties: &EventTraceProperties{}, + closeTrace: closeTrace, + controlTrace: controlTrace, + } + + err := session.StopSession() + assert.NoError(t, err) +} diff --git a/x-pack/libbeat/reader/etw/event.go b/x-pack/libbeat/reader/etw/event.go new file mode 100644 index 000000000000..34faa8d21cb7 --- /dev/null +++ b/x-pack/libbeat/reader/etw/event.go @@ -0,0 +1,340 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// propertyParser is used for parsing properties from raw EVENT_RECORD structures. +type propertyParser struct { + r *EventRecord + info *TraceEventInfo + data []byte + ptrSize uint32 +} + +// GetEventProperties extracts and returns properties from an ETW event record. +func GetEventProperties(r *EventRecord) (map[string]interface{}, error) { + // Handle the case where the event only contains a string. + if r.EventHeader.Flags == EVENT_HEADER_FLAG_STRING_ONLY { + userDataPtr := (*uint16)(unsafe.Pointer(r.UserData)) + return map[string]interface{}{ + "_": utf16AtOffsetToString(uintptr(unsafe.Pointer(userDataPtr)), 0), // Convert the user data from UTF16 to string. + }, nil + } + + // Initialize a new property parser for the event record. + p, err := newPropertyParser(r) + if err != nil { + return nil, fmt.Errorf("failed to parse event properties: %w", err) + } + + // Iterate through each property of the event and format it + properties := make(map[string]interface{}, int(p.info.TopLevelPropertyCount)) + for i := 0; i < int(p.info.TopLevelPropertyCount); i++ { + name := p.getPropertyName(i) + value, err := p.getPropertyValue(i) + if err != nil { + return nil, fmt.Errorf("failed to parse %q value: %w", name, err) + } + properties[name] = value + } + + return properties, nil +} + +// newPropertyParser initializes a new property parser for a given event record. +func newPropertyParser(r *EventRecord) (*propertyParser, error) { + info, err := getEventInformation(r) + if err != nil { + return nil, fmt.Errorf("failed to get event information: %w", err) + } + ptrSize := r.pointerSize() + // Return a new propertyParser instance initialized with event record data and metadata. + return &propertyParser{ + r: r, + info: info, + ptrSize: ptrSize, + data: unsafe.Slice((*uint8)(unsafe.Pointer(r.UserData)), r.UserDataLength), + }, nil +} + +// getEventPropertyInfoAtIndex looks for the EventPropertyInfo object at a specified index. +func (info *TraceEventInfo) getEventPropertyInfoAtIndex(i uint32) *EventPropertyInfo { + if i < info.PropertyCount { + // Calculate the address of the first element in EventPropertyInfoArray. + eventPropertyInfoPtr := uintptr(unsafe.Pointer(&info.EventPropertyInfoArray[0])) + // Adjust the pointer to point to the i-th EventPropertyInfo element. + eventPropertyInfoPtr += uintptr(i) * unsafe.Sizeof(EventPropertyInfo{}) + + return ((*EventPropertyInfo)(unsafe.Pointer(eventPropertyInfoPtr))) + } + return nil +} + +// getEventInformation retrieves detailed metadata about an event record. +func getEventInformation(r *EventRecord) (info *TraceEventInfo, err error) { + // Initially call TdhGetEventInformation to get the required buffer size. + var bufSize uint32 + if err = _TdhGetEventInformation(r, 0, nil, nil, &bufSize); errors.Is(err, ERROR_INSUFFICIENT_BUFFER) { + // Allocate enough memory for TRACE_EVENT_INFO based on the required size. + buff := make([]byte, bufSize) + info = ((*TraceEventInfo)(unsafe.Pointer(&buff[0]))) + // Retrieve the event information into the allocated buffer. + err = _TdhGetEventInformation(r, 0, nil, info, &bufSize) + } + + // Check for errors in retrieving the event information. + if err != nil { + return nil, fmt.Errorf("TdhGetEventInformation failed: %w", err) + } + + return info, nil +} + +// getPropertyName retrieves the name of the i-th event property in the event record. +func (p *propertyParser) getPropertyName(i int) string { + // Convert the UTF16 property name to a Go string. + namePtr := readPropertyName(p, i) + return windows.UTF16PtrToString((*uint16)(namePtr)) +} + +// readPropertyName gets the pointer to the property name in the event information structure. +func readPropertyName(p *propertyParser, i int) unsafe.Pointer { + // Calculate the pointer to the property name using its offset in the event property array. + return unsafe.Add(unsafe.Pointer(p.info), p.info.getEventPropertyInfoAtIndex(uint32(i)).NameOffset) +} + +// getPropertyValue retrieves the value of a specified event property. +func (p *propertyParser) getPropertyValue(i int) (interface{}, error) { + propertyInfo := p.info.getEventPropertyInfoAtIndex(uint32(i)) + + // Determine the size of the property array. + arraySize, err := p.getArraySize(*propertyInfo) + if err != nil { + return nil, fmt.Errorf("failed to get array size: %w", err) + } + + // Initialize a slice to hold the property values. + result := make([]interface{}, arraySize) + for j := 0; j < int(arraySize); j++ { + var ( + value interface{} + err error + ) + // Parse the property value based on its type (simple or structured). + if (propertyInfo.Flags & PropertyStruct) == PropertyStruct { + value, err = p.parseStruct(*propertyInfo) + } else { + value, err = p.parseSimpleType(*propertyInfo) + } + if err != nil { + return nil, err + } + result[j] = value + } + + // Return the entire result set or the single value, based on the property count. + if ((propertyInfo.Flags & PropertyParamCount) == PropertyParamCount) || + (propertyInfo.count() > 1) { + return result, nil + } + return result[0], nil +} + +// getArraySize calculates the size of an array property within an event. +func (p *propertyParser) getArraySize(propertyInfo EventPropertyInfo) (uint32, error) { + // Check if the property's count is specified by another property. + if (propertyInfo.Flags & PropertyParamCount) == PropertyParamCount { + var dataDescriptor PropertyDataDescriptor + // Locate the property containing the array size using the countPropertyIndex. + dataDescriptor.PropertyName = readPropertyName(p, int(propertyInfo.count())) + dataDescriptor.ArrayIndex = 0xFFFFFFFF + // Retrieve the length of the array from the specified property. + return getLengthFromProperty(p.r, &dataDescriptor) + } else { + // If the array size is directly specified, return it. + return uint32(propertyInfo.count()), nil + } +} + +// getLengthFromProperty retrieves the length of a property from an event record. +func getLengthFromProperty(r *EventRecord, dataDescriptor *PropertyDataDescriptor) (uint32, error) { + var length uint32 + // Call TdhGetProperty to get the length of the property specified by the dataDescriptor. + err := _TdhGetProperty( + r, + 0, + nil, + 1, + dataDescriptor, + uint32(unsafe.Sizeof(length)), + (*byte)(unsafe.Pointer(&length)), + ) + if err != nil { + return 0, err + } + return length, nil +} + +// parseStruct extracts and returns the fields from an embedded structure within a property. +func (p *propertyParser) parseStruct(propertyInfo EventPropertyInfo) (map[string]interface{}, error) { + // Determine the start and end indexes of the structure members within the property info. + startIndex := propertyInfo.structStartIndex() + lastIndex := startIndex + propertyInfo.numOfStructMembers() + + // Initialize a map to hold the structure's fields. + structure := make(map[string]interface{}, (lastIndex - startIndex)) + // Iterate through each member of the structure. + for j := startIndex; j < lastIndex; j++ { + name := p.getPropertyName(int(j)) + value, err := p.getPropertyValue(int(j)) + if err != nil { + return nil, fmt.Errorf("failed parse field '%s' of complex property type: %w", name, err) + } + structure[name] = value // Add the field to the structure map. + } + + return structure, nil +} + +// parseSimpleType parses a simple property type using TdhFormatProperty. +func (p *propertyParser) parseSimpleType(propertyInfo EventPropertyInfo) (string, error) { + var mapInfo *EventMapInfo + if propertyInfo.mapNameOffset() > 0 { + // If failed retrieving the map information, returns on error + var err error + mapInfo, err = p.getMapInfo(propertyInfo) + if err != nil { + return "", fmt.Errorf("failed to get map information due to: %w", err) + } + } + + // Get the length of the property. + propertyLength, err := p.getPropertyLength(propertyInfo) + if err != nil { + return "", fmt.Errorf("failed to get property length due to: %w", err) + } + + var userDataConsumed uint16 + + // Set a default buffer size for formatted data. + formattedDataSize := uint32(DEFAULT_PROPERTY_BUFFER_SIZE) + formattedData := make([]byte, int(formattedDataSize)) + + // Retry loop to handle buffer size adjustments. +retryLoop: + for { + var dataPtr *uint8 + if len(p.data) > 0 { + dataPtr = &p.data[0] + } + err := _TdhFormatProperty( + p.info, + mapInfo, + p.ptrSize, + propertyInfo.inType(), + propertyInfo.outType(), + uint16(propertyLength), + uint16(len(p.data)), + dataPtr, + &formattedDataSize, + &formattedData[0], + &userDataConsumed, + ) + + switch { + case err == nil: + // If formatting is successful, break out of the loop. + break retryLoop + case errors.Is(err, ERROR_INSUFFICIENT_BUFFER): + // Increase the buffer size if it's insufficient. + formattedData = make([]byte, formattedDataSize) + continue + case errors.Is(err, ERROR_EVT_INVALID_EVENT_DATA): + // Handle invalid event data error. + // Discarding MapInfo allows us to access + // at least the non-interpreted data. + if mapInfo != nil { + mapInfo = nil + continue + } + return "", fmt.Errorf("TdhFormatProperty failed: %w", err) // Handle unknown error + default: + return "", fmt.Errorf("TdhFormatProperty failed: %w", err) + } + } + // Update the data slice to account for consumed data. + p.data = p.data[userDataConsumed:] + + // Convert the formatted data to string and return. + return windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&formattedData[0]))), nil +} + +// getMapInfo retrieves mapping information for a given property. +func (p *propertyParser) getMapInfo(propertyInfo EventPropertyInfo) (*EventMapInfo, error) { + var mapSize uint32 + // Get the name of the map from the property info. + mapName := (*uint16)(unsafe.Add(unsafe.Pointer(p.info), propertyInfo.mapNameOffset())) + + // First call to get the required size of the map info. + err := _TdhGetEventMapInformation(p.r, mapName, nil, &mapSize) + switch { + case errors.Is(err, ERROR_NOT_FOUND): + // No mapping information available. This is not an error. + return nil, nil + case errors.Is(err, ERROR_INSUFFICIENT_BUFFER): + // Resize the buffer and try again. + default: + return nil, fmt.Errorf("TdhGetEventMapInformation failed to get size: %w", err) + } + + // Allocate buffer and retrieve the actual map information. + buff := make([]byte, int(mapSize)) + mapInfo := ((*EventMapInfo)(unsafe.Pointer(&buff[0]))) + err = _TdhGetEventMapInformation(p.r, mapName, mapInfo, &mapSize) + if err != nil { + return nil, fmt.Errorf("TdhGetEventMapInformation failed: %w", err) + } + + if mapInfo.EntryCount == 0 { + return nil, nil // No entries in the map. + } + + return mapInfo, nil +} + +// getPropertyLength returns the length of a specific property within TraceEventInfo. +func (p *propertyParser) getPropertyLength(propertyInfo EventPropertyInfo) (uint32, error) { + // Check if the length of the property is defined by another property. + if (propertyInfo.Flags & PropertyParamLength) == PropertyParamLength { + var dataDescriptor PropertyDataDescriptor + // Read the property name that contains the length information. + dataDescriptor.PropertyName = readPropertyName(p, int(propertyInfo.length())) + dataDescriptor.ArrayIndex = 0xFFFFFFFF + // Retrieve the length from the specified property. + return getLengthFromProperty(p.r, &dataDescriptor) + } + + inType := propertyInfo.inType() + outType := propertyInfo.outType() + // Special handling for properties representing IPv6 addresses. + // https://docs.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhformatproperty#remarks + if TdhIntypeBinary == inType && TdhOuttypeIpv6 == outType { + // Return the fixed size of an IPv6 address. + return 16, nil + } + + // Default case: return the length as defined in the property info. + // Note: A length of 0 can indicate a variable-length field (e.g., structure, string). + return uint32(propertyInfo.length()), nil +} diff --git a/x-pack/libbeat/reader/etw/provider.go b/x-pack/libbeat/reader/etw/provider.go new file mode 100644 index 000000000000..e0a20c3facd1 --- /dev/null +++ b/x-pack/libbeat/reader/etw/provider.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// utf16AtOffsetToString converts a UTF-16 encoded string +// at a specific offset in a struct to a Go string. +func utf16AtOffsetToString(pstruct uintptr, offset uintptr) string { + // Initialize a slice to store UTF-16 characters. + out := make([]uint16, 0, 64) + + // Start reading at the given offset. + wc := (*uint16)(unsafe.Pointer(pstruct + offset)) + + // Iterate over the UTF-16 characters until a null terminator is encountered. + for i := uintptr(2); *wc != 0; i += 2 { + out = append(out, *wc) + wc = (*uint16)(unsafe.Pointer(pstruct + offset + i)) + } + + // Convert the UTF-16 slice to a Go string and return. + return syscall.UTF16ToString(out) +} + +// guidFromProviderName searches for a provider by name and returns its GUID. +func guidFromProviderName(providerName string) (windows.GUID, error) { + // Returns if the provider name is empty. + if providerName == "" { + return windows.GUID{}, fmt.Errorf("empty provider name") + } + + var buf *ProviderEnumerationInfo + size := uint32(1) + + // Attempt to retrieve provider information with a buffer that increases in size until it's sufficient. + for { + tmp := make([]byte, size) + buf = (*ProviderEnumerationInfo)(unsafe.Pointer(&tmp[0])) + if err := enumerateProvidersFunc(buf, &size); !errors.Is(err, ERROR_INSUFFICIENT_BUFFER) { + break + } + } + + if buf.NumberOfProviders == 0 { + return windows.GUID{}, fmt.Errorf("no providers found") + } + + // Iterate through the list of providers to find a match by name. + startProvEnumInfo := uintptr(unsafe.Pointer(buf)) + it := uintptr(unsafe.Pointer(&buf.TraceProviderInfoArray[0])) + for i := uintptr(0); i < uintptr(buf.NumberOfProviders); i++ { + pInfo := (*TraceProviderInfo)(unsafe.Pointer(it + i*unsafe.Sizeof(buf.TraceProviderInfoArray[0]))) + name := utf16AtOffsetToString(startProvEnumInfo, uintptr(pInfo.ProviderNameOffset)) + + // If a match is found, return the corresponding GUID. + if name == providerName { + return pInfo.ProviderGuid, nil + } + } + + // No matching provider is found. + return windows.GUID{}, fmt.Errorf("unable to find GUID from provider name") +} + +// IsGUIDValid checks if GUID contains valid data +// (any of the fields in the GUID are non-zero) +func IsGUIDValid(guid windows.GUID) bool { + return guid.Data1 != 0 || guid.Data2 != 0 || guid.Data3 != 0 || guid.Data4 != [8]byte{} +} diff --git a/x-pack/libbeat/reader/etw/provider_test.go b/x-pack/libbeat/reader/etw/provider_test.go new file mode 100644 index 000000000000..d8c561ef3e4f --- /dev/null +++ b/x-pack/libbeat/reader/etw/provider_test.go @@ -0,0 +1,199 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "encoding/binary" + "syscall" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +func TestUTF16AtOffsetToString(t *testing.T) { + // Create a UTF-16 string + sampleText := "This is a string test!" + utf16Str, _ := syscall.UTF16FromString(sampleText) + + // Convert it to uintptr (simulate as if it's part of a larger struct) + ptr := uintptr(unsafe.Pointer(&utf16Str[0])) + + // Test the function + result := utf16AtOffsetToString(ptr, 0) + assert.Equal(t, sampleText, result, "The converted string should match the original") + + // Test with offset (skip the first character) + offset := unsafe.Sizeof(utf16Str[0]) // Size of one UTF-16 character + resultWithOffset := utf16AtOffsetToString(ptr, offset) + assert.Equal(t, sampleText[1:], resultWithOffset, "The converted string with offset should skip the first character") +} + +func TestGUIDFromProviderName_EmptyName(t *testing.T) { + guid, err := guidFromProviderName("") + assert.EqualError(t, err, "empty provider name") + assert.Equal(t, windows.GUID{}, guid, "GUID should be empty for an empty provider name") +} + +func TestGUIDFromProviderName_EmptyProviderList(t *testing.T) { + // Defer restoration of the original function + t.Cleanup(func() { + enumerateProvidersFunc = _TdhEnumerateProviders + }) + + // Define a mock provider name and GUID for testing. + mockProviderName := "NonExistentProvider" + + enumerateProvidersFunc = func(pBuffer *ProviderEnumerationInfo, pBufferSize *uint32) error { + // Check if the buffer size is sufficient + requiredSize := uint32(unsafe.Sizeof(ProviderEnumerationInfo{})) + uint32(unsafe.Sizeof(TraceProviderInfo{}))*0 // As there are no providers + if *pBufferSize < requiredSize { + // Set the size required and return the error + *pBufferSize = requiredSize + return ERROR_INSUFFICIENT_BUFFER + } + + // Empty list of providers + *pBuffer = ProviderEnumerationInfo{ + NumberOfProviders: 0, + TraceProviderInfoArray: [anysizeArray]TraceProviderInfo{}, + } + return nil + } + + guid, err := guidFromProviderName(mockProviderName) + assert.EqualError(t, err, "no providers found") + assert.Equal(t, windows.GUID{}, guid, "GUID should be empty when the provider is not found") +} + +func TestGUIDFromProviderName_GUIDNotFound(t *testing.T) { + // Defer restoration of the original function + t.Cleanup(func() { + enumerateProvidersFunc = _TdhEnumerateProviders + }) + + // Define a mock provider name and GUID for testing. + mockProviderName := "NonExistentProvider" + realProviderName := "ExistentProvider" + mockGUID := windows.GUID{Data1: 1234, Data2: 5678} + + enumerateProvidersFunc = func(pBuffer *ProviderEnumerationInfo, pBufferSize *uint32) error { + // Convert provider name to UTF-16 + utf16ProviderName, _ := syscall.UTF16FromString(realProviderName) + + // Calculate size needed for the provider name string + nameSize := (len(utf16ProviderName) + 1) * 2 // +1 for null-terminator + + requiredSize := uint32(unsafe.Sizeof(ProviderEnumerationInfo{})) + uint32(unsafe.Sizeof(TraceProviderInfo{})) + uint32(nameSize) + if *pBufferSize < requiredSize { + *pBufferSize = requiredSize + return ERROR_INSUFFICIENT_BUFFER + } + + // Calculate the offset for the provider name + // It's placed after ProviderEnumerationInfo and TraceProviderInfo + nameOffset := unsafe.Sizeof(ProviderEnumerationInfo{}) + unsafe.Sizeof(TraceProviderInfo{}) + + // Convert pBuffer to a byte slice starting at the calculated offset for the name + byteBuffer := (*[1 << 30]byte)(unsafe.Pointer(pBuffer))[:] + // Copy the UTF-16 encoded name into the buffer + for i, char := range utf16ProviderName { + binary.LittleEndian.PutUint16(byteBuffer[nameOffset+(uintptr(i)*2):], char) + } + + // Create and populate the ProviderEnumerationInfo struct + *pBuffer = ProviderEnumerationInfo{ + NumberOfProviders: 1, + TraceProviderInfoArray: [anysizeArray]TraceProviderInfo{ + { + ProviderGuid: mockGUID, + ProviderNameOffset: uint32(nameOffset), + }, + }, + } + return nil + } + + guid, err := guidFromProviderName(mockProviderName) + assert.EqualError(t, err, "unable to find GUID from provider name") + assert.Equal(t, windows.GUID{}, guid, "GUID should be empty when the provider is not found") +} + +func TestGUIDFromProviderName_Success(t *testing.T) { + // Defer restoration of the original function + t.Cleanup(func() { + enumerateProvidersFunc = _TdhEnumerateProviders + }) + + // Define a mock provider name and GUID for testing. + mockProviderName := "MockProvider" + mockGUID := windows.GUID{Data1: 1234, Data2: 5678} + + enumerateProvidersFunc = func(pBuffer *ProviderEnumerationInfo, pBufferSize *uint32) error { + // Convert provider name to UTF-16 + utf16ProviderName, _ := syscall.UTF16FromString(mockProviderName) + + // Calculate size needed for the provider name string + nameSize := (len(utf16ProviderName) + 1) * 2 // +1 for null-terminator + + requiredSize := uint32(unsafe.Sizeof(ProviderEnumerationInfo{})) + uint32(unsafe.Sizeof(TraceProviderInfo{})) + uint32(nameSize) + if *pBufferSize < requiredSize { + *pBufferSize = requiredSize + return ERROR_INSUFFICIENT_BUFFER + } + + // Calculate the offset for the provider name + // It's placed after ProviderEnumerationInfo and TraceProviderInfo + nameOffset := unsafe.Sizeof(ProviderEnumerationInfo{}) + unsafe.Sizeof(TraceProviderInfo{}) + + // Convert pBuffer to a byte slice starting at the calculated offset for the name + byteBuffer := (*[1 << 30]byte)(unsafe.Pointer(pBuffer))[:] + // Copy the UTF-16 encoded name into the buffer + for i, char := range utf16ProviderName { + binary.LittleEndian.PutUint16(byteBuffer[nameOffset+(uintptr(i)*2):], char) + } + + // Create and populate the ProviderEnumerationInfo struct + *pBuffer = ProviderEnumerationInfo{ + NumberOfProviders: 1, + TraceProviderInfoArray: [anysizeArray]TraceProviderInfo{ + { + ProviderGuid: mockGUID, + ProviderNameOffset: uint32(nameOffset), + }, + }, + } + return nil + } + + // Run the test + guid, err := guidFromProviderName(mockProviderName) + assert.NoError(t, err) + assert.Equal(t, mockGUID, guid, "GUID should match the mock GUID") +} + +func TestIsGUIDValid_True(t *testing.T) { + // Valid GUID + validGUID := windows.GUID{ + Data1: 0xeb79061a, + Data2: 0xa566, + Data3: 0x4698, + Data4: [8]byte{0x12, 0x34, 0x3e, 0xd2, 0x80, 0x70, 0x33, 0xa0}, + } + + valid := IsGUIDValid(validGUID) + assert.True(t, valid, "IsGUIDValid should return true for a valid GUID") +} + +func TestIsGUIDValid_False(t *testing.T) { + // Invalid GUID (all zeros) + invalidGUID := windows.GUID{} + + valid := IsGUIDValid(invalidGUID) + assert.False(t, valid, "IsGUIDValid should return false for an invalid GUID") +} diff --git a/x-pack/libbeat/reader/etw/session.go b/x-pack/libbeat/reader/etw/session.go new file mode 100644 index 000000000000..3a8e7be51d7c --- /dev/null +++ b/x-pack/libbeat/reader/etw/session.go @@ -0,0 +1,250 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// For testing purposes we create a variable to store the function to call +// When running tests, these variables point to a mock function +var ( + guidFromProviderNameFunc = guidFromProviderName + setSessionGUIDFunc = setSessionGUID +) + +type Session struct { + // Name is the identifier for the session. + // It is used to identify the session in logs and also for Windows processes. + Name string + // GUID is the provider GUID to configure the session. + GUID windows.GUID + // properties of the session that are initialized in newSessionProperties() + // See https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_properties for more information + properties *EventTraceProperties + // handler of the event tracing session for which the provider is being configured. + // It is obtained from StartTrace when a new trace is started. + // This handler is needed to enable, query or stop the trace. + handler uintptr + // Realtime is a flag to know if the consumer reads from a logfile or real-time session. + Realtime bool // Real-time flag + // NewSession is a flag to indicate whether a new session has been created or attached to an existing one. + NewSession bool + // TraceLevel sets the maximum level of events that we want the provider to write. + traceLevel uint8 + // matchAnyKeyword is a 64-bit bitmask of keywords that determine the categories of events that we want the provider to write. + // The provider writes an event if the event's keyword bits match any of the bits set in this value + // or if the event has no keyword bits set, in addition to meeting the level and matchAllKeyword criteria. + matchAnyKeyword uint64 + // matchAllKeyword is a 64-bit bitmask of keywords that restricts the events that we want the provider to write. + // The provider typically writes an event if the event's keyword bits match all of the bits set in this value + // or if the event has no keyword bits set, in addition to meeting the level and matchAnyKeyword criteria. + matchAllKeyword uint64 + // traceHandler is the trace processing handle. + // It is used to control the trace that receives and processes events. + traceHandler uint64 + // Callback is the pointer to EventRecordCallback which receives and processes event trace events. + Callback func(*EventRecord) uintptr + // BufferCallback is the pointer to BufferCallback which processes retrieved metadata about the ETW buffers (optional). + BufferCallback func(*EventTraceLogfile) uintptr + + // Pointers to functions that make calls to the Windows API. + // In tests, these pointers can be replaced with mock functions to simulate API behavior without making actual calls to the Windows API. + startTrace func(*uintptr, *uint16, *EventTraceProperties) error + controlTrace func(traceHandle uintptr, instanceName *uint16, properties *EventTraceProperties, controlCode uint32) error + enableTrace func(traceHandle uintptr, providerId *windows.GUID, isEnabled uint32, level uint8, matchAnyKeyword uint64, matchAllKeyword uint64, enableProperty uint32, enableParameters *EnableTraceParameters) error + closeTrace func(traceHandle uint64) error + openTrace func(elf *EventTraceLogfile) (uint64, error) + processTrace func(handleArray *uint64, handleCount uint32, startTime *FileTime, endTime *FileTime) error +} + +// setSessionName determines the session name based on the provided configuration. +func setSessionName(conf Config) string { + // Iterate through potential session name values, returning the first non-empty one. + for _, value := range []string{conf.Logfile, conf.Session, conf.SessionName} { + if value != "" { + return value + } + } + + if conf.ProviderName != "" { + return fmt.Sprintf("Elastic-%s", conf.ProviderName) + } + + return fmt.Sprintf("Elastic-%s", conf.ProviderGUID) +} + +// setSessionGUID determines the session GUID based on the provided configuration. +func setSessionGUID(conf Config) (windows.GUID, error) { + var guid windows.GUID + var err error + + // If ProviderGUID is not set in the configuration, attempt to resolve it using the provider name. + if conf.ProviderGUID == "" { + guid, err = guidFromProviderNameFunc(conf.ProviderName) + if err != nil { + return windows.GUID{}, fmt.Errorf("error resolving GUID: %w", err) + } + } else { + // If ProviderGUID is set, parse it into a GUID structure. + guid, err = windows.GUIDFromString(conf.ProviderGUID) + if err != nil { + return windows.GUID{}, fmt.Errorf("error parsing Windows GUID: %w", err) + } + } + + return guid, nil +} + +// getTraceLevel converts a string representation of a trace level +// to its corresponding uint8 constant value +func getTraceLevel(level string) uint8 { + switch level { + case "critical": + return TRACE_LEVEL_CRITICAL + case "error": + return TRACE_LEVEL_ERROR + case "warning": + return TRACE_LEVEL_WARNING + case "information": + return TRACE_LEVEL_INFORMATION + case "verbose": + return TRACE_LEVEL_VERBOSE + default: + return TRACE_LEVEL_INFORMATION + } +} + +// newSessionProperties initializes and returns a pointer to EventTraceProperties +// with the necessary settings for starting an ETW session. +// See https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_properties +func newSessionProperties(sessionName string) *EventTraceProperties { + // Calculate buffer size for session properties. + sessionNameSize := (len(sessionName) + 1) * 2 + bufSize := sessionNameSize + int(unsafe.Sizeof(EventTraceProperties{})) + + // Allocate buffer and cast to EventTraceProperties. + propertiesBuf := make([]byte, bufSize) + sessionProperties := (*EventTraceProperties)(unsafe.Pointer(&propertiesBuf[0])) + + // Initialize mandatory fields of the EventTraceProperties struct. + // Filled based on https://learn.microsoft.com/en-us/windows/win32/etw/wnode-header + sessionProperties.Wnode.BufferSize = uint32(bufSize) + sessionProperties.Wnode.Guid = windows.GUID{} // GUID not required for non-private/kernel sessions + // ClientContext is used for timestamp resolution + // Not used unless adding PROCESS_TRACE_MODE_RAW_TIMESTAMP flag to EVENT_TRACE_LOGFILE struct + // See https://learn.microsoft.com/en-us/windows/win32/etw/wnode-header + sessionProperties.Wnode.ClientContext = 1 + sessionProperties.Wnode.Flags = WNODE_FLAG_TRACED_GUID + // Set logging mode to real-time + // See https://learn.microsoft.com/en-us/windows/win32/etw/logging-mode-constants + sessionProperties.LogFileMode = EVENT_TRACE_REAL_TIME_MODE + sessionProperties.LogFileNameOffset = 0 // Can be specified to log to a file as well as to a real-time session + sessionProperties.BufferSize = 64 // Default buffer size, can be configurable + sessionProperties.LoggerNameOffset = uint32(unsafe.Sizeof(EventTraceProperties{})) // Offset to the logger name + + return sessionProperties +} + +// NewSession initializes and returns a new ETW Session based on the provided configuration. +func NewSession(conf Config) (Session, error) { + var session Session + var err error + + // Assign ETW Windows API functions + session.startTrace = _StartTrace + session.controlTrace = _ControlTrace + session.enableTrace = _EnableTraceEx2 + session.openTrace = _OpenTrace + session.processTrace = _ProcessTrace + session.closeTrace = _CloseTrace + + session.Name = setSessionName(conf) + session.Realtime = true + + // If a current session is configured, set up the session properties and return. + if conf.Session != "" { + session.properties = newSessionProperties(session.Name) + return session, nil + } else if conf.Logfile != "" { + // If a logfile is specified, set up for non-realtime session. + session.Realtime = false + return session, nil + } + + session.NewSession = true // Indicate this is a new session + + session.GUID, err = setSessionGUIDFunc(conf) + if err != nil { + return Session{}, err + } + + // Initialize additional session properties. + session.properties = newSessionProperties(session.Name) + session.traceLevel = getTraceLevel(conf.TraceLevel) + session.matchAnyKeyword = conf.MatchAnyKeyword + session.matchAllKeyword = conf.MatchAllKeyword + + return session, nil +} + +// StartConsumer initializes and starts the ETW event tracing session. +func (s *Session) StartConsumer() error { + var elf EventTraceLogfile + var err error + + // Configure EventTraceLogfile based on the session type (realtime or not). + if !s.Realtime { + elf.LogFileMode = PROCESS_TRACE_MODE_EVENT_RECORD + logfilePtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert logfile name: %w", err) + } + elf.LogFileName = logfilePtr + } else { + elf.LogFileMode = PROCESS_TRACE_MODE_EVENT_RECORD | PROCESS_TRACE_MODE_REAL_TIME + sessionPtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert session name: %w", err) + } + elf.LoggerName = sessionPtr + } + + // Set callback and context for the session. + if s.Callback == nil { + return fmt.Errorf("error loading callback") + } + elf.Callback = syscall.NewCallback(s.Callback) + elf.Context = 0 + + // Open an ETW trace processing handle for consuming events + // from an ETW real-time trace session or an ETW log file. + s.traceHandler, err = s.openTrace(&elf) + + switch { + case err == nil: + + // Handle specific errors for trace opening. + case errors.Is(err, ERROR_BAD_PATHNAME): + return fmt.Errorf("invalid log source when opening trace: %w", err) + case errors.Is(err, ERROR_ACCESS_DENIED): + return fmt.Errorf("access denied when opening trace: %w", err) + default: + return fmt.Errorf("failed to open trace: %w", err) + } + // Process the trace. This function blocks until processing ends. + if err := s.processTrace(&s.traceHandler, 1, nil, nil); err != nil { + return fmt.Errorf("failed to process trace: %w", err) + } + + return nil +} diff --git a/x-pack/libbeat/reader/etw/session_test.go b/x-pack/libbeat/reader/etw/session_test.go new file mode 100644 index 000000000000..005b9839d5c6 --- /dev/null +++ b/x-pack/libbeat/reader/etw/session_test.go @@ -0,0 +1,338 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "fmt" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +// TestSetSessionName tests the setSessionName function with various configurations. +func TestSetSessionName(t *testing.T) { + testCases := []struct { + name string + config Config + expectedName string + }{ + { + name: "ProviderNameSet", + config: Config{ + ProviderName: "Provider1", + }, + expectedName: "Elastic-Provider1", + }, + { + name: "SessionNameSet", + config: Config{ + SessionName: "Session1", + }, + expectedName: "Session1", + }, + { + name: "LogFileSet", + config: Config{ + Logfile: "LogFile1.etl", + }, + expectedName: "LogFile1.etl", + }, + { + name: "FallbackToProviderGUID", + config: Config{ + ProviderGUID: "12345", + }, + expectedName: "Elastic-12345", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sessionName := setSessionName(tc.config) + assert.Equal(t, tc.expectedName, sessionName, "The session name should be correctly determined") + }) + } +} + +func mockGUIDFromProviderName(providerName string) (windows.GUID, error) { + // Return a mock GUID regardless of the input + return windows.GUID{Data1: 0x12345678, Data2: 0x1234, Data3: 0x5678, Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}}, nil +} + +func TestSetSessionGUID_ProviderName(t *testing.T) { + // Defer restoration of original function + t.Cleanup(func() { + guidFromProviderNameFunc = guidFromProviderName + }) + + // Replace with mock function + guidFromProviderNameFunc = mockGUIDFromProviderName + + conf := Config{ProviderName: "Provider1"} + expectedGUID := windows.GUID{Data1: 0x12345678, Data2: 0x1234, Data3: 0x5678, Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}} + + guid, err := setSessionGUID(conf) + assert.NoError(t, err) + assert.Equal(t, expectedGUID, guid, "The GUID should match the mock GUID") +} + +func TestSetSessionGUID_ProviderGUID(t *testing.T) { + // Example GUID string + guidString := "{12345678-1234-5678-1234-567812345678}" + + // Configuration with a set ProviderGUID + conf := Config{ProviderGUID: guidString} + + // Expected GUID based on the GUID string + expectedGUID := windows.GUID{Data1: 0x12345678, Data2: 0x1234, Data3: 0x5678, Data4: [8]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}} + + guid, err := setSessionGUID(conf) + + assert.NoError(t, err) + assert.Equal(t, expectedGUID, guid, "The GUID should match the expected value") +} + +func TestGetTraceLevel(t *testing.T) { + testCases := []struct { + name string + level string + expectedCode uint8 + }{ + {"CriticalLevel", "critical", TRACE_LEVEL_CRITICAL}, + {"ErrorLevel", "error", TRACE_LEVEL_ERROR}, + {"WarningLevel", "warning", TRACE_LEVEL_WARNING}, + {"InformationLevel", "information", TRACE_LEVEL_INFORMATION}, + {"VerboseLevel", "verbose", TRACE_LEVEL_VERBOSE}, + {"DefaultLevel", "unknown", TRACE_LEVEL_INFORMATION}, // Default case + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getTraceLevel(tc.level) + assert.Equal(t, tc.expectedCode, result, "Trace level code should match the expected value") + }) + } +} + +func TestNewSessionProperties(t *testing.T) { + testCases := []struct { + name string + sessionName string + expectedSize uint32 + }{ + {"EmptyName", "", 2 + uint32(unsafe.Sizeof(EventTraceProperties{}))}, + {"NormalName", "Session1", 18 + uint32(unsafe.Sizeof(EventTraceProperties{}))}, + // Additional test cases can be added here + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + props := newSessionProperties(tc.sessionName) + + assert.Equal(t, tc.expectedSize, props.Wnode.BufferSize, "BufferSize should match expected value") + assert.Equal(t, windows.GUID{}, props.Wnode.Guid, "GUID should be empty") + assert.Equal(t, uint32(1), props.Wnode.ClientContext, "ClientContext should be 1") + assert.Equal(t, uint32(WNODE_FLAG_TRACED_GUID), props.Wnode.Flags, "Flags should match WNODE_FLAG_TRACED_GUID") + assert.Equal(t, uint32(EVENT_TRACE_REAL_TIME_MODE), props.LogFileMode, "LogFileMode should be set to real-time") + assert.Equal(t, uint32(0), props.LogFileNameOffset, "LogFileNameOffset should be 0") + assert.Equal(t, uint32(64), props.BufferSize, "BufferSize should be 64") + assert.Equal(t, uint32(unsafe.Sizeof(EventTraceProperties{})), props.LoggerNameOffset, "LoggerNameOffset should be the size of EventTraceProperties") + }) + } +} + +func TestNewSession_ProviderName(t *testing.T) { + // Defer restoration of original function + t.Cleanup(func() { + setSessionGUIDFunc = setSessionGUID + }) + + // Override setSessionGUIDFunc with mock + setSessionGUIDFunc = func(conf Config) (windows.GUID, error) { + return windows.GUID{ + Data1: 0x12345678, + Data2: 0x1234, + Data3: 0x5678, + Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}, + }, nil + } + + expectedGUID := windows.GUID{ + Data1: 0x12345678, + Data2: 0x1234, + Data3: 0x5678, + Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}, + } + + conf := Config{ + ProviderName: "Provider1", + SessionName: "Session1", + TraceLevel: "warning", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.NoError(t, err) + assert.Equal(t, "Session1", session.Name, "SessionName should match expected value") + assert.Equal(t, expectedGUID, session.GUID, "The GUID in the session should match the expected GUID") + assert.Equal(t, uint8(3), session.traceLevel, "TraceLevel should be 3 (warning)") + assert.Equal(t, true, session.NewSession) + assert.Equal(t, true, session.Realtime) + assert.NotNil(t, session.properties) +} + +func TestNewSession_GUIDError(t *testing.T) { + // Defer restoration of original function + t.Cleanup(func() { + setSessionGUIDFunc = setSessionGUID + }) + + // Override setSessionGUIDFunc with mock + setSessionGUIDFunc = func(conf Config) (windows.GUID, error) { + // Return an empty GUID and an error + return windows.GUID{}, fmt.Errorf("mock error") + } + + conf := Config{ + ProviderName: "Provider1", + SessionName: "Session1", + TraceLevel: "warning", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.EqualError(t, err, "mock error") + expectedSession := Session{} + assert.Equal(t, expectedSession, session, "Session should be its zero value when an error occurs") + +} + +func TestNewSession_AttachSession(t *testing.T) { + // Test case + conf := Config{ + Session: "Session1", + SessionName: "TestSession", + TraceLevel: "verbose", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.NoError(t, err) + assert.Equal(t, "Session1", session.Name, "SessionName should match expected value") + assert.Equal(t, false, session.NewSession) + assert.Equal(t, true, session.Realtime) + assert.NotNil(t, session.properties) +} + +func TestNewSession_Logfile(t *testing.T) { + // Test case + conf := Config{ + Logfile: "LogFile1.etl", + TraceLevel: "verbose", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.NoError(t, err) + assert.Equal(t, "LogFile1.etl", session.Name, "SessionName should match expected value") + assert.Equal(t, false, session.NewSession) + assert.Equal(t, false, session.Realtime) + assert.Nil(t, session.properties) +} + +func TestStartConsumer_CallbackNull(t *testing.T) { + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: false, + BufferCallback: nil, + Callback: nil, + } + + err := session.StartConsumer() + assert.EqualError(t, err, "error loading callback") +} + +func TestStartConsumer_OpenTraceError(t *testing.T) { + // Mock implementation of openTrace + openTrace := func(elf *EventTraceLogfile) (uint64, error) { + return 0, ERROR_ACCESS_DENIED // Mock a valid session handler + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: false, + BufferCallback: nil, + Callback: func(*EventRecord) uintptr { + return 1 + }, + openTrace: openTrace, + } + + err := session.StartConsumer() + assert.EqualError(t, err, "access denied when opening trace: Access is denied.") +} + +func TestStartConsumer_ProcessTraceError(t *testing.T) { + // Mock implementations + openTrace := func(elf *EventTraceLogfile) (uint64, error) { + return 12345, nil // Mock a valid session handler + } + + processTrace := func(handleArray *uint64, handleCount uint32, startTime *FileTime, endTime *FileTime) error { + return ERROR_INVALID_PARAMETER + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: true, + BufferCallback: nil, + Callback: func(*EventRecord) uintptr { + return 1 + }, + openTrace: openTrace, + processTrace: processTrace, + } + + err := session.StartConsumer() + assert.EqualError(t, err, "failed to process trace: The parameter is incorrect.") +} + +func TestStartConsumer_Success(t *testing.T) { + // Mock implementations + openTrace := func(elf *EventTraceLogfile) (uint64, error) { + return 12345, nil // Mock a valid session handler + } + + processTrace := func(handleArray *uint64, handleCount uint32, startTime *FileTime, endTime *FileTime) error { + return nil + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: true, + BufferCallback: nil, + Callback: func(*EventRecord) uintptr { + return 1 + }, + openTrace: openTrace, + processTrace: processTrace, + } + + err := session.StartConsumer() + assert.NoError(t, err) + assert.Equal(t, uint64(12345), session.traceHandler, "traceHandler should be set to the mock value") +} diff --git a/x-pack/libbeat/reader/etw/syscall_advapi32.go b/x-pack/libbeat/reader/etw/syscall_advapi32.go new file mode 100644 index 000000000000..fe44b0022a46 --- /dev/null +++ b/x-pack/libbeat/reader/etw/syscall_advapi32.go @@ -0,0 +1,318 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + advapi32 = windows.NewLazySystemDLL("advapi32.dll") + // Controller + startTraceW = advapi32.NewProc("StartTraceW") + enableTraceEx2 = advapi32.NewProc("EnableTraceEx2") // Manifest-based providers and filtering + controlTraceW = advapi32.NewProc("ControlTraceW") + // Consumer + openTraceW = advapi32.NewProc("OpenTraceW") + processTrace = advapi32.NewProc("ProcessTrace") + closeTrace = advapi32.NewProc("CloseTrace") +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace +type EventTrace struct { + Header EventTraceHeader + InstanceId uint32 + ParentInstanceId uint32 + ParentGuid windows.GUID + MofData uintptr + MofLength uint32 + UnionCtx uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_header +type EventTraceHeader struct { + Size uint16 + Union1 uint16 + Union2 uint32 + ThreadId uint32 + ProcessId uint32 + TimeStamp int64 + Union3 [16]byte + Union4 uint64 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_properties +type EventTraceProperties struct { + Wnode WnodeHeader + BufferSize uint32 + MinimumBuffers uint32 + MaximumBuffers uint32 + MaximumFileSize uint32 + LogFileMode uint32 + FlushTimer uint32 + EnableFlags uint32 + AgeLimit int32 + NumberOfBuffers uint32 + FreeBuffers uint32 + EventsLost uint32 + BuffersWritten uint32 + LogBuffersLost uint32 + RealTimeBuffersLost uint32 + LoggerThreadId syscall.Handle + LogFileNameOffset uint32 + LoggerNameOffset uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/etw/wnode-header +type WnodeHeader struct { + BufferSize uint32 + ProviderId uint32 + Union1 uint64 + Union2 int64 + Guid windows.GUID + ClientContext uint32 + Flags uint32 +} + +// Used to enable a provider via EnableTraceEx2 +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-enable_trace_parameters +type EnableTraceParameters struct { + Version uint32 + EnableProperty uint32 + ControlFlags uint32 + SourceId windows.GUID + EnableFilterDesc *EventFilterDescriptor + FilterDescrCount uint32 +} + +// Defines the filter data that a session passes +// to the provider's enable callback function +// https://learn.microsoft.com/en-us/windows/win32/api/evntprov/ns-evntprov-event_filter_descriptor +type EventFilterDescriptor struct { + Ptr uint64 + Size uint32 + Type uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_logfilew +type EventTraceLogfile struct { + LogFileName *uint16 // Logfile + LoggerName *uint16 // Real-time session + CurrentTime int64 + BuffersRead uint32 + LogFileMode uint32 + CurrentEvent EventTrace + LogfileHeader TraceLogfileHeader + BufferCallback uintptr + BufferSize uint32 + Filled uint32 + EventsLost uint32 + // Receive events (EventRecordCallback (TDH) or EventCallback) + // Tip: New code should use EventRecordCallback instead of EventCallback. + // The EventRecordCallback receives an EVENT_RECORD which contains + // more complete event information + Callback uintptr + IsKernelTrace uint32 + Context uintptr +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-trace_logfile_header +type TraceLogfileHeader struct { + BufferSize uint32 + VersionUnion uint32 + ProviderVersion uint32 + NumberOfProcessors uint32 + EndTime int64 + TimerResolution uint32 + MaximumFileSize uint32 + LogFileMode uint32 + BuffersWritten uint32 + Union1 [16]byte + LoggerName *uint16 + LogFileName *uint16 + TimeZone windows.Timezoneinformation + BootTime int64 + PerfFreq int64 + StartTime int64 + ReservedFlags uint32 + BuffersLost uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime +type FileTime struct { + dwLowDateTime uint32 + dwHighDateTime uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-systemtime +type SystemTime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-enabletrace +const ( + TRACE_LEVEL_NONE = 0 + TRACE_LEVEL_CRITICAL = 1 + TRACE_LEVEL_FATAL = 1 + TRACE_LEVEL_ERROR = 2 + TRACE_LEVEL_WARNING = 3 + TRACE_LEVEL_INFORMATION = 4 + TRACE_LEVEL_VERBOSE = 5 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntprov/nc-evntprov-penablecallback +const ( + EVENT_CONTROL_CODE_DISABLE_PROVIDER = 0 + EVENT_CONTROL_CODE_ENABLE_PROVIDER = 1 + EVENT_CONTROL_CODE_CAPTURE_STATE = 2 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-controltracea +const ( + EVENT_TRACE_CONTROL_QUERY = 0 + EVENT_TRACE_CONTROL_STOP = 1 + EVENT_TRACE_CONTROL_UPDATE = 2 + EVENT_TRACE_CONTROL_FLUSH = 3 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_logfilea +const ( + PROCESS_TRACE_MODE_REAL_TIME = 0x00000100 + PROCESS_TRACE_MODE_RAW_TIMESTAMP = 0x00001000 + PROCESS_TRACE_MODE_EVENT_RECORD = 0x10000000 +) + +const INVALID_PROCESSTRACE_HANDLE = 0xFFFFFFFFFFFFFFFF + +// https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes +const ( + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_INVALID_HANDLE syscall.Errno = 6 + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_INVALID_PARAMETER syscall.Errno = 87 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_BAD_PATHNAME syscall.Errno = 161 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 + ERROR_TIMEOUT syscall.Errno = 1460 + ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 + ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 + ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 +) + +// https://learn.microsoft.com/en-us/windows/win32/etw/logging-mode-constants (to extend modes) +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wmistr/ns-wmistr-_wnode_header (to extend flags) +const ( + WNODE_FLAG_ALL_DATA = 0x00000001 + WNODE_FLAG_TRACED_GUID = 0x00020000 + EVENT_TRACE_REAL_TIME_MODE = 0x00000100 +) + +// Wrappers + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-starttracew +func _StartTrace(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + r0, _, _ := startTraceW.Call( + uintptr(unsafe.Pointer(traceHandle)), + uintptr(unsafe.Pointer(instanceName)), + uintptr(unsafe.Pointer(properties))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-enabletraceex2 +func _EnableTraceEx2(traceHandle uintptr, + providerId *windows.GUID, + isEnabled uint32, + level uint8, + matchAnyKeyword uint64, + matchAllKeyword uint64, + enableProperty uint32, + enableParameters *EnableTraceParameters) error { + r0, _, _ := enableTraceEx2.Call( + traceHandle, + uintptr(unsafe.Pointer(providerId)), + uintptr(isEnabled), + uintptr(level), + uintptr(matchAnyKeyword), + uintptr(matchAllKeyword), + uintptr(enableProperty), + uintptr(unsafe.Pointer(enableParameters))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-controltracew +func _ControlTrace(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + r0, _, _ := controlTraceW.Call( + traceHandle, + uintptr(unsafe.Pointer(instanceName)), + uintptr(unsafe.Pointer(properties)), + uintptr(controlCode)) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-opentracew +func _OpenTrace(logfile *EventTraceLogfile) (uint64, error) { + r0, _, err := openTraceW.Call( + uintptr(unsafe.Pointer(logfile))) + var errno syscall.Errno + if errors.As(err, &errno) && errno == 0 { + return uint64(r0), nil + } + return uint64(r0), err +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-processtrace +func _ProcessTrace(handleArray *uint64, + handleCount uint32, + startTime *FileTime, + endTime *FileTime) error { + r0, _, _ := processTrace.Call( + uintptr(unsafe.Pointer(handleArray)), + uintptr(handleCount), + uintptr(unsafe.Pointer(startTime)), + uintptr(unsafe.Pointer(endTime))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-closetrace +func _CloseTrace(traceHandle uint64) error { + r0, _, _ := closeTrace.Call( + uintptr(traceHandle)) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} diff --git a/x-pack/libbeat/reader/etw/syscall_tdh.go b/x-pack/libbeat/reader/etw/syscall_tdh.go new file mode 100644 index 000000000000..73551ee123e2 --- /dev/null +++ b/x-pack/libbeat/reader/etw/syscall_tdh.go @@ -0,0 +1,323 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + tdh = windows.NewLazySystemDLL("tdh.dll") + tdhEnumerateProviders = tdh.NewProc("TdhEnumerateProviders") + tdhGetEventInformation = tdh.NewProc("TdhGetEventInformation") + tdhGetEventMapInformation = tdh.NewProc("TdhGetEventMapInformation") + tdhFormatProperty = tdh.NewProc("TdhFormatProperty") + tdhGetProperty = tdh.NewProc("TdhGetProperty") +) + +const anysizeArray = 1 +const DEFAULT_PROPERTY_BUFFER_SIZE = 256 + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-provider_enumeration_info +type ProviderEnumerationInfo struct { + NumberOfProviders uint32 + Reserved uint32 + TraceProviderInfoArray [anysizeArray]TraceProviderInfo +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-trace_provider_info +type TraceProviderInfo struct { + ProviderGuid windows.GUID + SchemaSource uint32 + ProviderNameOffset uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntcons/ns-evntcons-event_record +type EventRecord struct { + EventHeader EventHeader + BufferContext EtwBufferContext + ExtendedDataCount uint16 + UserDataLength uint16 + ExtendedData *EventHeaderExtendedDataItem + UserData uintptr // Event data + UserContext uintptr +} + +// https://learn.microsoft.com/en-us/windows/win32/api/relogger/ns-relogger-event_header +const ( + EVENT_HEADER_FLAG_STRING_ONLY = 0x0004 + EVENT_HEADER_FLAG_32_BIT_HEADER = 0x0020 + EVENT_HEADER_FLAG_64_BIT_HEADER = 0x0040 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/relogger/ns-relogger-event_header +type EventHeader struct { + Size uint16 + HeaderType uint16 + Flags uint16 + EventProperty uint16 + ThreadId uint32 + ProcessId uint32 + TimeStamp int64 + ProviderId windows.GUID + EventDescriptor EventDescriptor + Time int64 + ActivityId windows.GUID +} + +func (e *EventRecord) pointerSize() uint32 { + if e.EventHeader.Flags&EVENT_HEADER_FLAG_32_BIT_HEADER == EVENT_HEADER_FLAG_32_BIT_HEADER { + return 4 + } + return 8 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntprov/ns-evntprov-event_descriptor +type EventDescriptor struct { + Id uint16 + Version uint8 + Channel uint8 + Level uint8 + Opcode uint8 + Task uint16 + Keyword uint64 +} + +// https://learn.microsoft.com/en-us/windows/desktop/api/relogger/ns-relogger-etw_buffer_context +type EtwBufferContext struct { + Union uint16 + LoggerId uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntcons/ns-evntcons-event_header_extended_data_item +type EventHeaderExtendedDataItem struct { + Reserved1 uint16 + ExtType uint16 + InternalStruct uint16 + DataSize uint16 + DataPtr uint64 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-tdh_context +type TdhContext struct { + ParameterValue uint32 + ParameterType int32 + ParameterSize uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-trace_event_info +type TraceEventInfo struct { + ProviderGUID windows.GUID + EventGUID windows.GUID + EventDescriptor EventDescriptor + DecodingSource DecodingSource + ProviderNameOffset uint32 + LevelNameOffset uint32 + ChannelNameOffset uint32 + KeywordsNameOffset uint32 + TaskNameOffset uint32 + OpcodeNameOffset uint32 + EventMessageOffset uint32 + ProviderMessageOffset uint32 + BinaryXMLOffset uint32 + BinaryXMLSize uint32 + ActivityIDNameOffset uint32 + RelatedActivityIDNameOffset uint32 + PropertyCount uint32 + TopLevelPropertyCount uint32 + Flags TemplateFlags + EventPropertyInfoArray [anysizeArray]EventPropertyInfo +} + +// https://learn.microsoft.com/en-us/windows/desktop/api/tdh/ns-tdh-event_property_info +type EventPropertyInfo struct { + Flags PropertyFlags + NameOffset uint32 + TypeUnion struct { + u1 uint16 + u2 uint16 + u3 uint32 + } + CountUnion uint16 + LengthUnion uint16 + ResTagUnion uint32 +} + +func (i *EventPropertyInfo) count() uint16 { + return i.CountUnion +} + +func (i *EventPropertyInfo) length() uint16 { + return i.LengthUnion +} + +func (i *EventPropertyInfo) inType() uint16 { + return i.TypeUnion.u1 +} + +func (i *EventPropertyInfo) outType() uint16 { + return i.TypeUnion.u2 +} + +func (i *EventPropertyInfo) structStartIndex() uint16 { + return i.inType() +} + +func (i *EventPropertyInfo) numOfStructMembers() uint16 { + return i.outType() +} + +func (i *EventPropertyInfo) mapNameOffset() uint32 { + return i.TypeUnion.u3 +} + +const ( + TdhIntypeBinary = 14 + TdhOuttypeIpv6 = 24 +) + +type DecodingSource int32 +type TemplateFlags int32 + +type PropertyFlags int32 + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ne-tdh-property_flags +const ( + PropertyStruct = PropertyFlags(0x1) + PropertyParamLength = PropertyFlags(0x2) + PropertyParamCount = PropertyFlags(0x4) +) + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-event_map_info +type EventMapInfo struct { + NameOffset uint32 + Flag MapFlags + EntryCount uint32 + Union uint32 + MapEntryArray [anysizeArray]EventMapEntry +} + +type MapFlags int32 + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-event_map_entry +type EventMapEntry struct { + OutputOffset uint32 + Union uint32 +} + +// https://learn.microsoft.com/en-us/windows/desktop/api/tdh/ns-tdh-property_data_descriptor +type PropertyDataDescriptor struct { + PropertyName unsafe.Pointer + ArrayIndex uint32 + Reserved uint32 +} + +// enumerateProvidersFunc is used to replace the pointer to the function in unit tests +var enumerateProvidersFunc = _TdhEnumerateProviders + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhenumerateproviders +func _TdhEnumerateProviders( + pBuffer *ProviderEnumerationInfo, + pBufferSize *uint32) error { + r0, _, _ := tdhEnumerateProviders.Call( + uintptr(unsafe.Pointer(pBuffer)), + uintptr(unsafe.Pointer(pBufferSize))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhgeteventinformation +func _TdhGetEventInformation(pEvent *EventRecord, + tdhContextCount uint32, + pTdhContext *TdhContext, + pBuffer *TraceEventInfo, + pBufferSize *uint32) error { + r0, _, _ := tdhGetEventInformation.Call( + uintptr(unsafe.Pointer(pEvent)), + uintptr(tdhContextCount), + uintptr(unsafe.Pointer(pTdhContext)), + uintptr(unsafe.Pointer(pBuffer)), + uintptr(unsafe.Pointer(pBufferSize))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhformatproperty +func _TdhFormatProperty( + eventInfo *TraceEventInfo, + mapInfo *EventMapInfo, + pointerSize uint32, + propertyInType uint16, + propertyOutType uint16, + propertyLength uint16, + userDataLength uint16, + userData *byte, + bufferSize *uint32, + buffer *uint8, + userDataConsumed *uint16) error { + r0, _, _ := tdhFormatProperty.Call( + uintptr(unsafe.Pointer(eventInfo)), + uintptr(unsafe.Pointer(mapInfo)), + uintptr(pointerSize), + uintptr(propertyInType), + uintptr(propertyOutType), + uintptr(propertyLength), + uintptr(userDataLength), + uintptr(unsafe.Pointer(userData)), + uintptr(unsafe.Pointer(bufferSize)), + uintptr(unsafe.Pointer(buffer)), + uintptr(unsafe.Pointer(userDataConsumed))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhgetproperty +func _TdhGetProperty(pEvent *EventRecord, + tdhContextCount uint32, + pTdhContext *TdhContext, + propertyDataCount uint32, + pPropertyData *PropertyDataDescriptor, + bufferSize uint32, + pBuffer *byte) error { + r0, _, _ := tdhGetProperty.Call( + uintptr(unsafe.Pointer(pEvent)), + uintptr(tdhContextCount), + uintptr(unsafe.Pointer(pTdhContext)), + uintptr(propertyDataCount), + uintptr(unsafe.Pointer(pPropertyData)), + uintptr(bufferSize), + uintptr(unsafe.Pointer(pBuffer))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhgeteventmapinformation +func _TdhGetEventMapInformation(pEvent *EventRecord, + pMapName *uint16, + pBuffer *EventMapInfo, + pBufferSize *uint32) error { + r0, _, _ := tdhGetEventMapInformation.Call( + uintptr(unsafe.Pointer(pEvent)), + uintptr(unsafe.Pointer(pMapName)), + uintptr(unsafe.Pointer(pBuffer)), + uintptr(unsafe.Pointer(pBufferSize))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} diff --git a/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc b/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc index 7a93e3069816..887b06019399 100644 --- a/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc @@ -48,19 +48,24 @@ Then, Metricbeat can be launched. *Host Configuration* -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. Old style host configuration for backwards compatibility: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -DSN host configuration is the recommended way to configure the Oracle Metricbeat Module as it supports the usage of special characters in the password. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. [float] == Metricsets diff --git a/x-pack/metricbeat/module/sql/_meta/docs.asciidoc b/x-pack/metricbeat/module/sql/_meta/docs.asciidoc index 17175cb58780..95ae9376e4d0 100644 --- a/x-pack/metricbeat/module/sql/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/sql/_meta/docs.asciidoc @@ -859,19 +859,26 @@ Then, Metricbeat can be launched. ===== Host Configuration for Oracle -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. DSN host configuration as URL: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -The username and password to connect to the database can be provided as values to `username` and `password` keys of `sql.yml`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. + +The username and password to connect to the database can be provided as values to the `username` and `password` keys of `sql.yml`. [source,yml] ---- @@ -887,4 +894,4 @@ The username and password to connect to the database can be provided as values t sql_queries: - query: SELECT METRIC_NAME, VALUE FROM V$SYSMETRIC WHERE GROUP_ID = 2 and METRIC_NAME LIKE '%' response_format: variables ----- \ No newline at end of file +---- diff --git a/x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl b/x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl new file mode 100644 index 000000000000..ffb3bc696fc2 --- /dev/null +++ b/x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl @@ -0,0 +1,15 @@ +{{subheader "Elasticsearch Output"}} +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Pipeline to route events to protocol pipelines. + pipeline: "packetbeat-%{[agent.version]}-routing" diff --git a/x-pack/packetbeat/cmd/root.go b/x-pack/packetbeat/cmd/root.go index f77bd827bf22..8611fe8d1150 100644 --- a/x-pack/packetbeat/cmd/root.go +++ b/x-pack/packetbeat/cmd/root.go @@ -21,6 +21,9 @@ import ( // This registers the Npcap installer on Windows. _ "github.com/elastic/beats/v7/x-pack/packetbeat/npcap" + + // Enable pipelines. + _ "github.com/elastic/beats/v7/x-pack/packetbeat/module" ) // Name of this beat. diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go index c5df7ef2deb2..03104ab9157e 100644 --- a/x-pack/packetbeat/magefile.go +++ b/x-pack/packetbeat/magefile.go @@ -36,7 +36,7 @@ import ( // the packetbeat executable. It is used to specify which npcap builder crossbuild // image to use and the installer to obtain from the cloud store for testing. const ( - NpcapVersion = "1.78" + NpcapVersion = "1.79" installer = "npcap-" + NpcapVersion + "-oem.exe" ) @@ -47,6 +47,7 @@ func init() { devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." devtools.BeatLicense = "Elastic License" + packetbeat.SelectLogic = devtools.XPackProject } // Update updates the generated files. diff --git a/x-pack/packetbeat/module/amqp/ingest/default.yml b/x-pack/packetbeat/module/amqp/ingest/default.yml new file mode 100644 index 000000000000..7b2268f48129 --- /dev/null +++ b/x-pack/packetbeat/module/amqp/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing amqp traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + tag: gsubmac + ignore_missing: true +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + tag: gsubmac + ignore_missing: true +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreachip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipelineprocessor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/amqp/ingest/geoip.yml b/x-pack/packetbeat/module/amqp/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/amqp/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/cassandra/ingest/default.yml b/x-pack/packetbeat/module/cassandra/ingest/default.yml new file mode 100644 index 000000000000..61ce5ff4d736 --- /dev/null +++ b/x-pack/packetbeat/module/cassandra/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing cassandra traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsubmac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsubmac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreachip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipelineprocessor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/cassandra/ingest/geoip.yml b/x-pack/packetbeat/module/cassandra/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/cassandra/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dhcpv4/ingest/default.yml b/x-pack/packetbeat/module/dhcpv4/ingest/default.yml new file mode 100644 index 000000000000..1c3a2a572644 --- /dev/null +++ b/x-pack/packetbeat/module/dhcpv4/ingest/default.yml @@ -0,0 +1,74 @@ +--- +description: Pipeline for processing dhcpv4 traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: dhcpv4.client_mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_dhcpv4_client_mac +- gsub: + field: dhcpv4.client_mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_dhcpv4_client_mac +- uppercase: + field: dhcpv4.client_mac + ignore_missing: true +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml b/x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dns/ingest/default.yml b/x-pack/packetbeat/module/dns/ingest/default.yml new file mode 100644 index 000000000000..ff055c3c9b37 --- /dev/null +++ b/x-pack/packetbeat/module/dns/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing dhcpv4 traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dns/ingest/geoip.yml b/x-pack/packetbeat/module/dns/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/dns/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/flow/ingest/default.yml b/x-pack/packetbeat/module/flow/ingest/default.yml new file mode 100644 index 000000000000..6e969ea1a61e --- /dev/null +++ b/x-pack/packetbeat/module/flow/ingest/default.yml @@ -0,0 +1,89 @@ +--- +description: Pipeline for processing traffic flows +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set {host,source,destination}.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host +- gsub: + field: source.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_source_mac +- gsub: + field: source.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_source_mac +- uppercase: + field: source.mac + ignore_missing: true +- gsub: + field: destination.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_destination_mac +- gsub: + field: destination.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_destination_mac +- uppercase: + field: destination.mac + ignore_missing: true + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/flow/ingest/geoip.yml b/x-pack/packetbeat/module/flow/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/flow/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/http/ingest/default.yml b/x-pack/packetbeat/module/http/ingest/default.yml new file mode 100644 index 000000000000..e066200becb5 --- /dev/null +++ b/x-pack/packetbeat/module/http/ingest/default.yml @@ -0,0 +1,72 @@ +--- +description: Pipeline for processing http traffic +processors: +- set: + field: ecs.version + value: '8.11.0' + +# Detection Rules compatibility +- set: + tag: set_compatibility_request_authorization + field: network_traffic.http.request.headers.authorization + copy_from: http.request.headers.authorization + ignore_empty_value: true +- set: + tag: set_compatibility_response_type + field: http.response.mime_type + copy_from: http.response.headers.content-type + ignore_empty_value: true + +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/http/ingest/geoip.yml b/x-pack/packetbeat/module/http/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/http/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/icmp/ingest/default.yml b/x-pack/packetbeat/module/icmp/ingest/default.yml new file mode 100644 index 000000000000..7a50bb91cc56 --- /dev/null +++ b/x-pack/packetbeat/module/icmp/ingest/default.yml @@ -0,0 +1,66 @@ +--- +description: Pipeline for processing icmp traffic +processors: +- set: + field: ecs.version + value: '8.11.0' + +# Detection Rules compatibility +- set: + tag: set_compatibility_type + field: network.protocol + copy_from: type + +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/icmp/ingest/geoip.yml b/x-pack/packetbeat/module/icmp/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/icmp/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/memcached/ingest/default.yml b/x-pack/packetbeat/module/memcached/ingest/default.yml new file mode 100644 index 000000000000..d0f5f18088c1 --- /dev/null +++ b/x-pack/packetbeat/module/memcached/ingest/default.yml @@ -0,0 +1,79 @@ +--- +description: Pipeline for processing memcached traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +## +# Reformat memcache stats response data as a single object +## +- rename: + field: memcache.response.stats + target_field: memcache.response.stats_objects + ignore_missing: true +- foreach: + description: Build an object for memcache stats response data + if: ctx.memcache?.response?.stats_objects instanceof List + tag: foreach_memcache_response_stats_objects + field: memcache.response.stats_objects + processor: + set: + field: "memcache.response.stats.{{{_ingest._value.name}}}" + value: "{{{_ingest._value.value}}}" +- remove: + field: memcache.response.stats_objects + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/memcached/ingest/geoip.yml b/x-pack/packetbeat/module/memcached/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/memcached/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mongodb/ingest/default.yml b/x-pack/packetbeat/module/mongodb/ingest/default.yml new file mode 100644 index 000000000000..a40e27da35d7 --- /dev/null +++ b/x-pack/packetbeat/module/mongodb/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing mongodb traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mongodb/ingest/geoip.yml b/x-pack/packetbeat/module/mongodb/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/mongodb/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mysql/ingest/default.yml b/x-pack/packetbeat/module/mysql/ingest/default.yml new file mode 100644 index 000000000000..e9cb2ebcdb06 --- /dev/null +++ b/x-pack/packetbeat/module/mysql/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing mysql traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mysql/ingest/geoip.yml b/x-pack/packetbeat/module/mysql/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/mysql/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/nfs/ingest/default.yml b/x-pack/packetbeat/module/nfs/ingest/default.yml new file mode 100644 index 000000000000..a1b72a252179 --- /dev/null +++ b/x-pack/packetbeat/module/nfs/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing nfs traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/nfs/ingest/geoip.yml b/x-pack/packetbeat/module/nfs/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/nfs/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/pgsql/ingest/default.yml b/x-pack/packetbeat/module/pgsql/ingest/default.yml new file mode 100644 index 000000000000..bd28f9211e1f --- /dev/null +++ b/x-pack/packetbeat/module/pgsql/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing pgsql traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/pgsql/ingest/geoip.yml b/x-pack/packetbeat/module/pgsql/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/pgsql/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/pipeline.go b/x-pack/packetbeat/module/pipeline.go new file mode 100644 index 000000000000..a325fba7de4f --- /dev/null +++ b/x-pack/packetbeat/module/pipeline.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package module + +import ( + "embed" + + "github.com/elastic/beats/v7/packetbeat/module" +) + +// pipelineFS holds the yml representation of the ingest node pipelines +// +//go:embed */ingest/*.yml +var pipelinesFS embed.FS + +func init() { + module.PipelinesFS = &pipelinesFS +} diff --git a/x-pack/packetbeat/module/redis/ingest/default.yml b/x-pack/packetbeat/module/redis/ingest/default.yml new file mode 100644 index 000000000000..4f815adc3a90 --- /dev/null +++ b/x-pack/packetbeat/module/redis/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing redis traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/redis/ingest/geoip.yml b/x-pack/packetbeat/module/redis/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/redis/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/routing/ingest/default.yml b/x-pack/packetbeat/module/routing/ingest/default.yml new file mode 100644 index 000000000000..a11b5e79f7ab --- /dev/null +++ b/x-pack/packetbeat/module/routing/ingest/default.yml @@ -0,0 +1,64 @@ +--- +description: Route to appropriate data source pipenline. +processors: + - set: + field: event.ingested + value: '{{_ingest.timestamp}}' + + - pipeline: + if: ctx.type == "amqp" + name: '{< IngestPipeline "amqp" >}' + - pipeline: + if: ctx.type == "cassandra" + name: '{< IngestPipeline "cassandra" >}' + - pipeline: + if: ctx.type == "dhcpv4" + name: '{< IngestPipeline "dhcpv4" >}' + - pipeline: + if: ctx.type == "dns" + name: '{< IngestPipeline "dns" >}' + - pipeline: + if: ctx.type == "flow" + name: '{< IngestPipeline "flow" >}' + - pipeline: + if: ctx.type == "http" + name: '{< IngestPipeline "http" >}' + - pipeline: + if: ctx.type == "icmp" + name: '{< IngestPipeline "icmp" >}' + - pipeline: + if: ctx.type == "memcache" + name: '{< IngestPipeline "memcached" >}' + - pipeline: + if: ctx.type == "mongodb" + name: '{< IngestPipeline "mongodb" >}' + - pipeline: + if: ctx.type == "mysql" + name: '{< IngestPipeline "mysql" >}' + - pipeline: + if: ctx.type == "nfs" + name: '{< IngestPipeline "nfs" >}' + - pipeline: + if: ctx.type == "pgsql" + name: '{< IngestPipeline "pgsql" >}' + - pipeline: + if: ctx.type == "redis" + name: '{< IngestPipeline "redis" >}' + - pipeline: + if: ctx.type == "sip" + name: '{< IngestPipeline "sip" >}' + - pipeline: + if: ctx.type == "thrift" + name: '{< IngestPipeline "thrift" >}' + - pipeline: + if: ctx.type == "tls" + name: '{< IngestPipeline "tls" >}' + +on_failure: + - set: + field: event.kind + value: pipeline_error + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" diff --git a/x-pack/packetbeat/module/sip/ingest/default.yml b/x-pack/packetbeat/module/sip/ingest/default.yml new file mode 100644 index 000000000000..62f3d6c1c424 --- /dev/null +++ b/x-pack/packetbeat/module/sip/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing sip traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/sip/ingest/geoip.yml b/x-pack/packetbeat/module/sip/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/sip/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/thrift/ingest/default.yml b/x-pack/packetbeat/module/thrift/ingest/default.yml new file mode 100644 index 000000000000..f2726cea96b6 --- /dev/null +++ b/x-pack/packetbeat/module/thrift/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing thrift traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/thrift/ingest/geoip.yml b/x-pack/packetbeat/module/thrift/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/thrift/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/tls/ingest/default.yml b/x-pack/packetbeat/module/tls/ingest/default.yml new file mode 100644 index 000000000000..94ef3b55d224 --- /dev/null +++ b/x-pack/packetbeat/module/tls/ingest/default.yml @@ -0,0 +1,99 @@ +--- +description: Pipeline for processing tls traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +## +# Make tls.{client,server}.x509.version_number a string as per ECS. +## +- convert: + field: tls.client.x509.version_number + type: string + ignore_missing: true + tag: convert_tls_client_x509_version_number +- convert: + field: tls.server.x509.version_number + type: string + ignore_missing: true + tag: convert_tls_server_x509_version_number + +## +# This handles legacy TLS fields from Packetbeat 7.17. +## +- remove: + description: Remove legacy fields from Packetbeat 7.17 that are duplicated. + field: + - tls.client.x509.issuer.province # Duplicated as tls.client.x509.issuer.state_or_province. + - tls.client.x509.subject.province # Duplicated as tls.client.x509.subject.state_or_province. + - tls.client.x509.version # Duplicated as tls.client.x509.version_number. + - tls.detailed.client_certificate # Duplicated as tls.client.x509. + - tls.detailed.server_certificate # Duplicated as tls.server.x509. + - tls.server.x509.issuer.province # Duplicated as tls.server.x509.issuer.state_or_province. + - tls.server.x509.subject.province # Duplicated as tls.server.x509.subject.state_or_province. + - tls.server.x509.version # Duplicated as tls.server.x509.version_number. + ignore_missing: true + +- append: + field: related.hash + value: "{{tls.server.ja3s}}" + if: "ctx?.tls?.server?.ja3s != null" +- append: + field: related.hash + value: "{{tls.client.ja3}}" + if: "ctx?.tls?.client?.ja3 != null" + allow_duplicates: false + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/tls/ingest/geoip.yml b/x-pack/packetbeat/module/tls/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/tls/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/npcap/installer/LICENSE b/x-pack/packetbeat/npcap/installer/LICENSE index 1073eb3ff69f..d7823e2e47ad 100644 --- a/x-pack/packetbeat/npcap/installer/LICENSE +++ b/x-pack/packetbeat/npcap/installer/LICENSE @@ -1,6 +1,6 @@ -------------------------------------------------------------------------------- Dependency : Npcap (https://nmap.org/npcap/) -Version: 1.78 +Version: 1.79 Licence type: Commercial -------------------------------------------------------------------------------- diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 1e013fb081f5..c9dac77048ad 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -78,6 +78,11 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Packetbeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#packetbeat.overwrite_pipelines: false + # =================================== Flows ==================================== packetbeat.flows: diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index fea1a2fb1153..d78fb6a7ccd5 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -213,10 +213,6 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] - # Performance preset - one of "balanced", "throughput", "scale", - # "latency", or "custom". - preset: balanced - # Protocol - either `http` (default) or `https`. #protocol: "https" @@ -225,6 +221,9 @@ output.elasticsearch: #username: "elastic" #password: "changeme" + # Pipeline to route events to protocol pipelines. + pipeline: "packetbeat-%{[agent.version]}-routing" + # ------------------------------ Logstash Output ------------------------------- #output.logstash: # The Logstash hosts diff --git a/x-pack/packetbeat/tests/system/app_test.go b/x-pack/packetbeat/tests/system/app_test.go index fa1a359be70a..0f3668820837 100644 --- a/x-pack/packetbeat/tests/system/app_test.go +++ b/x-pack/packetbeat/tests/system/app_test.go @@ -29,7 +29,7 @@ import ( ) // Keep in sync with NpcapVersion in magefile.go. -const NpcapVersion = "1.78" +const NpcapVersion = "1.79" func TestWindowsNpcapInstaller(t *testing.T) { if runtime.GOOS != "windows" {