diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..f9e5ad3
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,14 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+indent_size = 4
+indent_style = tab
+trim_trailing_whitespace = true
+max_line_length = 80
+
+[*.md]
+max_line_length = 0
+trim_trailing_whitespace = false
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..175d3cf
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,208 @@
+# Source: https://github.com/alexkaratarakis/gitattributes/blob/master/Web.gitattributes
+
+## GITATTRIBUTES FOR WEB PROJECTS
+#
+# These settings are for any web project.
+#
+# Details per file setting:
+# text These files should be normalized (i.e. convert CRLF to LF).
+# binary These files are binary and should be left untouched.
+#
+# Note that binary is a macro for -text -diff.
+######################################################################
+
+# Auto detect
+## Handle line endings automatically for files detected as
+## text and leave all files detected as binary untouched.
+## This will handle all files NOT defined below.
+* text=auto
+
+# Source code
+*.bash text eol=lf
+*.bat text eol=crlf
+*.cmd text eol=crlf
+*.coffee text
+*.css text diff=css
+*.htm text diff=html
+*.html text diff=html
+*.inc text
+*.ini text
+*.js text
+*.json text
+*.jsx text
+*.less text
+*.ls text
+*.map text -diff
+*.od text
+*.onlydata text
+*.php text diff=php
+*.pl text
+*.ps1 text eol=crlf
+*.py text diff=python
+*.rb text diff=ruby
+*.sass text
+*.scm text
+*.scss text diff=css
+*.sh text eol=lf
+*.sql text
+*.styl text
+*.tag text
+*.ts text
+*.tsx text
+*.xml text
+*.xhtml text diff=html
+
+# Docker
+Dockerfile text
+
+# Documentation
+*.ipynb text
+*.markdown text diff=markdown
+*.md text diff=markdown
+*.mdwn text diff=markdown
+*.mdown text diff=markdown
+*.mkd text diff=markdown
+*.mkdn text diff=markdown
+*.mdtxt text
+*.mdtext text
+*.txt text
+AUTHORS text
+CHANGELOG text
+CHANGES text
+CONTRIBUTING text
+COPYING text
+copyright text
+*COPYRIGHT* text
+INSTALL text
+license text
+LICENSE text
+NEWS text
+readme text
+*README* text
+TODO text
+
+# Templates
+*.dot text
+*.ejs text
+*.erb text
+*.haml text
+*.handlebars text
+*.hbs text
+*.hbt text
+*.jade text
+*.latte text
+*.mustache text
+*.njk text
+*.phtml text
+*.svelte text
+*.tmpl text
+*.tpl text
+*.twig text
+*.vue text
+
+# Configs
+*.cnf text
+*.conf text
+*.config text
+.editorconfig text
+.env text
+.gitattributes text
+.gitconfig text
+.htaccess text
+*.lock text -diff
+package.json text eol=lf
+package-lock.json text -diff
+pnpm-lock.yaml text eol=lf -diff
+.prettierrc text
+yarn.lock text -diff
+*.toml text
+*.yaml text
+*.yml text
+browserslist text
+Makefile text
+makefile text
+
+# Heroku
+Procfile text
+
+# Graphics
+*.ai binary
+*.bmp binary
+*.eps binary
+*.gif binary
+*.gifv binary
+*.ico binary
+*.jng binary
+*.jp2 binary
+*.jpg binary
+*.jpeg binary
+*.jpx binary
+*.jxr binary
+*.pdf binary
+*.png binary
+*.psb binary
+*.psd binary
+# SVG treated as an asset (binary) by default.
+*.svg text
+# If you want to treat it as binary,
+# use the following line instead.
+# *.svg binary
+*.svgz binary
+*.tif binary
+*.tiff binary
+*.wbmp binary
+*.webp binary
+
+# Audio
+*.kar binary
+*.m4a binary
+*.mid binary
+*.midi binary
+*.mp3 binary
+*.ogg binary
+*.ra binary
+
+# Video
+*.3gpp binary
+*.3gp binary
+*.as binary
+*.asf binary
+*.asx binary
+*.avi binary
+*.fla binary
+*.flv binary
+*.m4v binary
+*.mng binary
+*.mov binary
+*.mp4 binary
+*.mpeg binary
+*.mpg binary
+*.ogv binary
+*.swc binary
+*.swf binary
+*.webm binary
+
+# Archives
+*.7z binary
+*.gz binary
+*.jar binary
+*.rar binary
+*.tar binary
+*.zip binary
+
+# Fonts
+*.ttf binary
+*.eot binary
+*.otf binary
+*.woff binary
+*.woff2 binary
+
+# Executables
+*.exe binary
+*.pyc binary
+
+# RC files (like .babelrc or .eslintrc)
+*.*rc text
+
+# Ignore files (like .npmignore or .gitignore)
+*.*ignore text
diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md
deleted file mode 100644
index 0907a9f..0000000
--- a/.github/ISSUE_TEMPLATE/enhancement.md
+++ /dev/null
@@ -1,27 +0,0 @@
----
-name: Enhancement request
-about: Suggest an idea for a future version of this project
-title: ''
-labels: enhancement, needs-triage
-assignees: ''
-
----
-
-[NOTE]: # ( ^^ Provide a general summary of the request in the title above. ^^ )
-
-## Summary
-
-[NOTE]: # ( Provide a brief overview of what the new feature is all about. )
-
-## Desired Behavior
-
-[NOTE]: # ( Tell us how the new feature should work. Be specific. )
-[TIP]: # ( Do NOT give us access or passwords to your New Relic account or API keys! )
-
-## Possible Solution
-
-[NOTE]: # ( Not required. Suggest how to implement the addition or change. )
-
-## Additional context
-
-[TIP]: # ( Why does this feature matter to you? What unique circumstances do you have? )
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..5744bb4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,17 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+### Feature Description
+A clear and concise description of the feature you want or need.
+
+### Describe Alternatives
+A clear and concise description of any alternative solutions or features you've considered. Are there examples you could link us to?
+
+### Additional context
+Add any other context here.
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000..371537e
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,28 @@
+# Description
+
+Please include a summary of the changes and the related issue.
+
+## Type of change
+
+- [ ] Bug fix (non-breaking change which fixes an issue)
+- [ ] New feature (non-breaking change which adds functionality)
+- [ ] Breaking change (fix or feature that would cause existing
+ functionality to not work as expected)
+- [ ] This change requires a documentation update
+
+# How Has This Been Tested?
+
+Please describe the tests that you ran to verify your changes.
+
+- [ ] Test A
+- [ ] Test B
+
+# Checklist:
+
+- [ ] My code follows the style guidelines of this project
+- [ ] I have performed a self-review of my code
+- [ ] I have commented my code, particularly in hard-to-understand areas
+- [ ] I have made corresponding changes to the documentation
+- [ ] My changes generate no new warnings
+- [ ] I have added tests that prove my fix is effective or that my feature works
+- [ ] New and existing unit tests pass locally with my changes
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000..6901d81
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,26 @@
+# This workflow will build a golang project
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
+
+name: Build & Test
+
+on: [push, pull_request]
+
+jobs:
+
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: '1.22.x'
+ check-latest: true
+
+ - name: Build
+ run: go build -v ./...
+
+ - name: Test
+ run: go test -v ./...
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..961fd72
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,51 @@
+# This workflow will release a golang project using goreleaser
+
+name: Release
+
+permissions: write-all
+
+on:
+ push:
+ branches: [ "main" ]
+
+jobs:
+
+ release:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: '1.22.x'
+ check-latest: true
+
+ - name: Set up svu
+ uses: obfu5c8/action-svu@v1
+ id: install_svu
+ with:
+ type: none
+
+ - name: Tag latest commit
+ id: tag-commit
+ run: |
+ CURR=$(svu c)
+ NEXT=$(svu)
+ if [[ "$NEXT" != "$CURR" ]]; then
+ git tag $NEXT
+ echo "next-version=$NEXT" >> "$GITHUB_OUTPUT"
+ else
+ echo "next-version=" >> "$GITHUB_OUTPUT"
+ fi
+
+ - name: Run GoReleaser
+ uses: goreleaser/goreleaser-action@v6
+ if: steps.tag-commit.outputs.next-version != ''
+ with:
+ args: release --clean
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml
new file mode 100644
index 0000000..0d728b4
--- /dev/null
+++ b/.github/workflows/validate.yml
@@ -0,0 +1,47 @@
+# This workflow will validate a golang project
+
+name: Validate
+
+on:
+ push:
+ pull_request:
+ branches:
+ - main
+
+jobs:
+
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 2
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: '1.22.x'
+ check-latest: true
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+
+ - name: Setup Node
+ uses: actions/setup-node@v4
+
+ - name: Add GOBIN to PATH
+ run: echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
+ shell: bash
+
+ - name: Install staticcheck
+ run: "go install 'honnef.co/go/tools/cmd/staticcheck@latest'"
+
+ - name: Run pre-commit
+ uses: pre-commit/action@v3.0.1
+
+ - name: Install commitlint
+ run: "npm install -g @commitlint/cli @commitlint/config-conventional"
+
+ - name: Lint last commit message
+ run: "commitlint --from HEAD~1 --to HEAD --verbose"
diff --git a/.gitignore b/.gitignore
index c45b753..accbf60 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,134 @@
-.DS_store
-cmd/standalone/standalone
-cmd/standalone/*.yaml
-cmd/playground
+# Source: https://github.com/github/gitignore/blob/master/Node.gitignore
+
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+lerna-debug.log*
+.pnpm-debug.log*
+
+# Diagnostic reports (https://nodejs.org/api/report.html)
+report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+*.lcov
+
+# nyc test coverage
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+bower_components
+
+# node-waf configuration
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+
+# Dependency directories
+node_modules/
+jspm_packages/
+
+# Snowpack dependency directory (https://snowpack.dev/)
+web_modules/
+
+# TypeScript cache
+*.tsbuildinfo
+
+# Optional npm cache directory
+.npm
+
+# Optional eslint cache
+.eslintcache
+
+# Microbundle cache
+.rpt2_cache/
+.rts2_cache_cjs/
+.rts2_cache_es/
+.rts2_cache_umd/
+
+# Optional REPL history
+.node_repl_history
+
+# Output of 'npm pack'
+*.tgz
+
+# Yarn Integrity file
+.yarn-integrity
+
+# dotenv environment variables file
+.env
+.env.test
+.env.production
+
+# parcel-bundler cache (https://parceljs.org/)
+.cache
+.parcel-cache
+
+# Next.js build output
+.next
+out
+
+# Nuxt.js build / generate output
+.nuxt
+dist
+
+# Gatsby files
+.cache/
+# Comment in the public line in if your project uses Gatsby and not Next.js
+# https://nextjs.org/blog/next-9-1#public-directory-support
+# public
+
+# vuepress build output
+.vuepress/dist
+
+# Serverless directories
+.serverless/
+
+# FuseBox cache
+.fusebox/
+
+# DynamoDB Local files
+.dynamodb/
+
+# TernJS port file
+.tern-port
+
+# Stores VSCode versions used for testing VSCode extensions
+.vscode-test
+
+# yarn v2
+.yarn/cache
+.yarn/unplugged
+.yarn/build-state.yml
+.yarn/install-state.gz
+.pnp.*
+
+# Binary directory
+bin
+
+# Config and local files
+config.yml
config.yaml
*.local.*
+
+dist/
+cf-params.json
+__tools/
main
+.DS_Store
diff --git a/.goreleaser.yaml b/.goreleaser.yaml
new file mode 100644
index 0000000..39b9c7e
--- /dev/null
+++ b/.goreleaser.yaml
@@ -0,0 +1,49 @@
+# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
+version: 2
+before:
+ hooks:
+ # You may remove this if you don't use go modules.
+ - go mod tidy
+
+builds:
+ - id: standalone
+ main: ./cmd/databricks/databricks.go
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - linux
+ - windows
+ goarch:
+ - "386"
+ - amd64
+ ldflags:
+ - '-s -w -X main.gInterationVersion={{.Version}} -X main.gGitCommit={{.Commit}} -X main.gBuildDate={{.Date}}'
+
+archives:
+ - builds:
+ - standalone
+ format: tar.gz
+ # this name template makes the OS and Arch compatible with the results of uname.
+ name_template: >-
+ {{ .ProjectName }}_
+ {{- title .Os }}_
+ {{- if eq .Arch "amd64" }}x86_64
+ {{- else if eq .Arch "386" }}i386
+ {{- else }}{{ .Arch }}{{ end }}
+ # use zip for windows archives
+ format_overrides:
+ - goos: windows
+ format: zip
+
+checksum:
+ name_template: 'checksums.txt'
+
+snapshot:
+ name_template: "{{ incpatch .Version }}-next"
+
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^docs:'
+ - '^test:'
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..a6df772
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,30 @@
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+default_stages:
+- commit
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-byte-order-marker
+ - id: check-executables-have-shebangs
+ - id: check-json
+ - id: check-shebang-scripts-are-executable
+ - id: check-yaml
+ exclude: cf-template.yaml
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+- repo: https://github.com/Bahjat/pre-commit-golang
+ rev: v1.0.3
+ hooks:
+ - id: go-vet
+ - id: go-static-check # install https://staticcheck.io/docs/
+- repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook
+ rev: v9.5.0
+ hooks:
+ - id: commitlint
+ stages:
+ - commit-msg
+ additional_dependencies: ['@commitlint/config-conventional']
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 09d46ee..9fce8d1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -27,4 +27,4 @@ For more information about CLAs, please check out Alex Russell’s excellent pos
## Slack
-We host a public Slack with a dedicated channel for contributors and maintainers of open source projects hosted by New Relic. If you are contributing to this project, you're welcome to request access to the #oss-contributors channel in the newrelicusers.slack.com workspace. To request access, see https://join.slack.com/t/newrelicusers/shared_invite/zt-1ayj69rzm-~go~Eo1whIQGYnu3qi15ng.
+We host a public Slack with a dedicated channel for contributors and maintainers of open source projects hosted by New Relic. If you are contributing to this project, you're welcome to request access to the #oss-contributors channel in the newrelicusers.slack.com workspace. To request access, please use this [link](https://join.slack.com/t/newrelicusers/shared_invite/zt-1ayj69rzm-~go~Eo1whIQGYnu3qi15ng).
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..e4c6d54
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,67 @@
+export PATH := $(PATH):$(GOPATH)/bin
+
+INTEGRATION := newrelic-databricks-integration
+BINARY_NAME = $(INTEGRATION)
+LAMBDA_BINARY_NAME = bootstrap
+BIN_FILES := ./cmd/databricks/...
+#LAMBDA_BIN_FILES := ./cmd/bitmovin-lambda/...
+
+GIT_COMMIT = $(shell git rev-parse HEAD)
+BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
+GIT_TAG = $(shell git describe --tags --abbrev=0 --exact-match 2>/dev/null)
+
+LDFLAGS += -X main.gInterationVersion=$(GIT_TAG)
+LDFLAGS += -X main.gGitCommit=${GIT_COMMIT}
+LDFLAGS += -X main.gBuildDate=${BUILD_DATE}
+
+all: build
+
+#build: clean compile compile-lambda compile-docker
+build: clean compile
+
+clean:
+ @echo "=== $(INTEGRATION) === [ clean ]: removing binaries..."
+ @rm -rfv bin
+
+bin/$(BINARY_NAME):
+ @echo "=== $(INTEGRATION) === [ compile ]: building $(BINARY_NAME)..."
+ @go mod tidy
+ @go build -v -ldflags '$(LDFLAGS)' -o bin/$(BINARY_NAME) $(BIN_FILES)
+
+#bin/docker/$(BINARY_NAME):
+# @echo "=== $(INTEGRATION) === [ compile ]: building Docker binary $(BINARY_NAME)..."
+# @go mod tidy
+# @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -v -ldflags '$(LDFLAGS)' \
+# -o bin/docker/$(BINARY_NAME) $(BIN_FILES)
+#
+#bin/$(LAMBDA_BINARY_NAME):
+# @echo "=== $(INTEGRATION) === [ compile ]: building $(LAMBDA_BINARY_NAME)..."
+# @go mod tidy
+# @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -v -ldflags '$(LDFLAGS)' \
+# -tags lambda.norpc -o bin/$(LAMBDA_BINARY_NAME) $(LAMBDA_BIN_FILES)
+
+compile: bin/$(BINARY_NAME)
+
+#compile-lambda: bin/$(LAMBDA_BINARY_NAME)
+#
+#compile-docker: bin/docker/$(BINARY_NAME)
+#
+#docker: build
+# @docker build -t newrelic-bitmovin-analytics:latest \
+# -f build/package/Dockerfile \
+# .
+#
+#package-lambda: build
+# @./scripts/lambda/build.sh
+#
+#deploy-lambda: build
+# @./scripts/lambda/deploy.sh
+#
+#update-lambda: build
+# @./scripts/lambda/update.sh
+#
+#delete-lambda:
+# @./scripts/lambda/delete.sh
+#
+#.PHONY: all build clean compile compile-lambda compile-docker docker deploy-lambda update-lambda delete-lambda
+.PHONY: all build clean compile
diff --git a/README.md b/README.md
index 6d9900a..ec8216c 100644
--- a/README.md
+++ b/README.md
@@ -1,135 +1,383 @@
+![GitHub forks](https://img.shields.io/github/forks/newrelic-experimental/newrelic-databricks-integration?style=social)
+![GitHub stars](https://img.shields.io/github/stars/newrelic-experimental/newrelic-databricks-integration?style=social)
+![GitHub watchers](https://img.shields.io/github/watchers/newrelic-experimental/newrelic-databricks-integration?style=social)
+
+![GitHub all releases](https://img.shields.io/github/downloads/newrelic-experimental/newrelic-databricks-integration/total)
+![GitHub release (latest by date)](https://img.shields.io/github/v/release/newrelic-experimental/newrelic-databricks-integration)
+![GitHub last commit](https://img.shields.io/github/last-commit/newrelic-experimental/newrelic-databricks-integration)
+![GitHub Release Date](https://img.shields.io/github/release-date/newrelic-experimental/newrelic-databricks-integration)
+
+![GitHub issues](https://img.shields.io/github/issues/newrelic-experimental/newrelic-databricks-integration)
+![GitHub issues closed](https://img.shields.io/github/issues-closed/newrelic-experimental/newrelic-databricks-integration)
+![GitHub pull requests](https://img.shields.io/github/issues-pr/newrelic-experimental/newrelic-databricks-integration)
+![GitHub pull requests closed](https://img.shields.io/github/issues-pr-closed/newrelic-experimental/newrelic-databricks-integration)
+
# New Relic Databricks Integration
-Welcome to the New Relic Integration for Databricks! This repository provides scripts and instructions for integrating New Relic with Databricks through New Relic Datbaricks Integration or through other Open Source tools like OpenTelemetry and Prometheus.
+This integration collects Spark telemetry, Workflow telemetry, and Cost and
+Billing information from Databricks.
+
+![Apache Spark Dashboard Screenshot](./examples/spark-dashboard-jobs.png)
+
+## Table of Contents
+
+* [Getting Started](#getting-started)
+ * [On-host Deployment](#on-host)
+* [Features](#features)
+* [Usage](#usage)
+ * [Command Line Options](#command-line-options)
+ * [Configuration](#configuration)
+ * [General configuration](#general-configuration)
+ * [Pipeline configuration](#pipeline-configuration)
+ * [Log configuration](#log-configuration)
+ * [Query configuration](#query-configuration)
+
+## Getting Started
+
+To get started with the New Relic Databricks integration, [deploy the integration on a supported host environment](#on-host) and then [import](https://docs.newrelic.com/docs/query-your-data/explore-query-data/dashboards/dashboards-charts-import-export-data/#import-json) the [sample dashboard](./examples/spark-daskboard.json).
+
+**NOTE:** [On-host deployment](#on-host) is currently the only supported deployment type.
+
+### On-host
+
+The New Relic Databricks integration provides binaries for the following
+host platforms.
+
+* Linux x86
+* Linux amd64
+* Windows x86
+* Windows amd64
+
+#### Deploy the integration on host
+
+To run the Databricks integration on a host, perform the following steps.
+
+1. Download the appropriate binary for your platform from the [latest release](https://github.com/newrelic-experimental/newrelic-databricks-integration/releases).
+1. Create a directory named `configs` in the directory containing the binary.
+1. Create a file named `config.yml` in the `configs` directory and copy the
+ contents of the file [`configs/config.template.yml`](./configs/config.template.yml)
+ in this repository into it.
+1. Edit the `config.yml` file to [configure](#configuration) the integration
+ appropriately for your environment.
+1. Using the directory containing the binary as the current working directory,
+ execute the binary using the appropriate [Command Line Options](#command-line-options).
+
+## Features
+
+The New Relic Databricks integration supports the following capabilities.
+
+* [Collect Spark telemetry](#event-log-files)
+
+ The New Relic Databricks integration can collect telemetry from Spark running
+ on Databricks. By default, the integration will automatically connect to
+ and collect telemetry from the Spark deployments in all clusters created via
+ the UI or API in the specified workspace.
+
+## Usage
+
+### Command Line Options
+
+| Option | Description | Default |
+| --- | --- | --- |
+| --config_path | path to the (#configyml) to use | `configs/config.yml` |
+| --dry_run | flag to enable "dry run" mode | `false` |
+| --env_prefix | prefix to use for environment variable lookup | `''` |
+| --verbose | flag to enable "verbose" mode | `false` |
+| --version | display version information only | N/a |
+
+### Configuration
+
+#### `config.yml`
+
+The Databricks integration is configured using a YAML file named [`config.yml`](#configyml).
+The default location for this file is `configs/config.yml` relative to the
+current working directory when the integration binary is executed. The supported
+configuration parameters are listed below. See [`config.template.yml`](./configs/config.template.yml)
+for a full configuration example.
+
+##### General configuration
+
+The parameters in this section are configured at the top level of the
+[`config.yml`](#configyml).
+
+###### `licenseKey`
+
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| New Relic license key | string | Y | N/a |
-## Overview
+This parameter specifies the New Relic License Key (INGEST) that should be used
+to send generated metrics.
-This repository provides you various ways to utilize New Relic directly from your Databricks environment. With options to monitor via standalone integration, OpenTelemetry (OTel), or Prometheus, you flexibly choose what best fits your operational needs.
+The license key can also be specified using the `NEW_RELIC_LICENSE_KEY`
+environment variable.
-- **New Relic Databricks Integration:** A direct connection between Databricks and New Relic, enabling seamless data transfer and analysis capabilities. This integration supports `spark metrics`, `databricks queries metrics`, `databricks job runs events`. This integration along with New Relic APM agent can pull `logs`, and cluster performance related data as well.
+###### `region`
-- **OpenTelemetry (OTel) Integration:** An open-source observability framework, enabling you to generate and manage telemetry data, supports `spark metrics` from Databricks. Please follow the [instructions here](/opentelemetry/README.md) for a detailed guide on how to add initialization scripts for OpenTelemetry to your Databricks cluster.
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| New Relic region identifier | `US` / `EU` | N | `US` |
-- **Prometheus Integration:** A powerful open-source systems monitoring and alerting toolkit which can process metrics from Databricks. Support `spark metrics` from Databricks. Please follow the [instructions here](link-to-the-instruction-page) for a detailed guide on how to add initialization scripts to your Databricks cluster.
+This parameter specifies which New Relic region that generated metrics should be
+sent to.
-Pick the option that suits your use-case and follow the associated guide to get started.
+###### `interval`
-## Setup New Relic Databricks Integration
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Polling interval (in _seconds_) | numeric | N | 60 |
-The Standalone environment runs the data pipelines as an independant service, either on-premises or cloud instances like AWS EC2. It can run on Linux, macOS, Windows, and any OS with support for GoLang.
+This parameter specifies the interval (in _seconds_) at which the integration
+should poll for data.
-### Prerequisites
+This parameter is only used when [`runAsService`](#runasservice) is set to
+`true`.
-- Go 1.20 or later.
+###### `runAsService`
-### Build
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Flag to enable running the integration as a "service" | `true` / `false` | N | `false` |
-Open a terminal, CD to `cmd/standalone`, and run:
+The integration can run either as a "service" or as a simple command line
+utility which runs once and exits when it is complete.
-```bash
-$ go build
-```
+When set to `true`, the integration process will run continuously and poll the
+for data at the recurring interval specified by the [`interval`](#interval)
+parameter. The process will only exit if it is explicitly stopped or a fatal
+error or panic occurs.
-### Configuring the Pipeline
+When set to `false`, the integration will run once and exit. This is intended for
+use with an external scheduling mechanism like [cron](https://man7.org/linux/man-pages/man8/cron.8.html).
-The standalone environment requieres a YAML file for pipeline configuration. The requiered keys are:
+###### `pipeline`
-- `interval`: Integer. Time in seconds between requests.
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| The root node for the set of [pipeline configuration](#pipeline-configuration) parameters | YAML Sequence | N | N/a |
-Check `config/example_config.yaml` for a configuration example.
+The integration retrieves, processes, and exports data to New Relic using
+a data pipeline consisting of one or more receivers, a processing chain, and a
+New Relic exporter. Various aspects of the pipeline are configurable. This
+element groups together the configuration parameters related to
+[pipeline configuration](#pipeline-configuration).
-#### New Relic APIs exporter
+###### `log`
-- `nr_account_id`: String. Account ID.
-- `nr_api_key`: String. Api key for writing.
-- `nr_endpoint`: String. New Relic endpoint region. Either `US` or `EU`. Optional, default value is `US`.
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| The root node for the set of [log configuration](#log-configuration) parameters | YAML Sequence | N | N/a |
-### Running the Pipeline
+The integration uses the [logrus](https://pkg.go.dev/github.com/sirupsen/logrus)
+package for application logging. This element groups together the configuration
+parameters related to [log configuration](#log-configuration).
-Just run the following command from the build folder:
+###### `mode`
-```bash
-$ ./standalone path/to/config.yaml
-```
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| The integration execution mode | `databricks` | N | `databricks` |
-To run the pipeline on system start, check your specific system init documentation.
+The integration execution mode. Currently, the only supported execution mode is
+`databricks`.
-### Adding Initialization Scripts to Databricks
+###### `databricks`
-Databricks Initialization Scripts are shell scripts that run when a cluster is starting. They are useful for setting up custom configurations or third-party integrations such as setting up monitoring agents. Here is how you add an init script to Databricks.
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| The root node for the set of [Databricks configuration](#databricks-configuration) parameters | YAML Sequence | N | N/a |
-Based on the cloud Databricks is hosted on, you will be able to run the APM agent.
+This element groups together the configuration parameters related to
+[Databricks configuration](#databricks-configuration).
-### Install New Relic APM on Databricks
+The configuration parameters in this section are only needed when collecting
+Databricks specific data such as workflow and job telemetry or Databricks
+account information or when collecting telemetry from Spark running on a
+Databricks cluster.
-1. **Add script to Databricks:** Create new file in workspace as nr-agent-installation.sh and add the below script to it.
+###### `spark`
- ```bash
- #!/bin/bash
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| The root node for the set of [Spark configuration](#spark-configuration) parameters | YAML Sequence | N | N/a |
- # Define the newrelic version and jar path
+This element groups together the configuration parameters related to
+[Spark configuration](#spark-configuration).
- NR_VERSION="8.10.0" # replace with the version you want
- NR_JAR_PATH="/databricks/jars/newrelic-agent-${NR_VERSION}.jar"
- NR_CONFIG_FILE="/databricks/jars/newrelic.yml"
+The configuration parameters in this section can be used with or without the
+[`databricks`](#databricks) configuration parameters depending on whether the
+target Spark instance(s) are running on Databricks or not. See the
+[Databricks configuration](#databricks-configuration) section for details on
+collecting Spark telemetry from Spark running on Databricks.
- # Download the newrelic java agent
- curl -o ${NR_JAR_PATH} -L https://download.newrelic.com/newrelic/java-agent/newrelic-agent/${NR_VERSION}/newrelic-agent-${NR_VERSION}.jar
+##### Pipeline configuration
- # Create new relic yml file
- echo "common: &default_settings
- license_key: 'xxxxxx' # Replace with your License Key
- agent_enabled: true
+###### `receiveBufferSize`
- production:
- <<: *default_settings
- app_name: Databricks" > ${NR_CONFIG_FILE}
- ```
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Size of the buffer that holds items before processing | number | N | 500 |
-2. **Add the script to your Databricks cluster:** To add the initialization script to your cluster in Databricks, follow these steps:
+This parameter specifies the size of the buffer that holds received items before
+being flushed through the processing chain and on to the exporters. When this
+size is reached, the items in the buffer will be flushed automatically.
- - Navigate to your Databricks workspace and go to the `Clusters` page.
- - Choose the cluster you want to add the script to and click `Edit`.
- - In the `Advanced Options` section, find the `Init Scripts` field.
- - Click on `Add`, then in the Script Path input, select workspace or cloud storage path where your script is stored.
- - Click `Confirm` and then `Update`.
+###### `harvestInterval`
-3. **Add Spark configurations to attach the java agent:**
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Harvest interval (in _seconds_) | number | N | 60 |
- - Navigate to your cluster `Advanced Options`, then `Spark`.
- - Add or update Spark configurations as key-value pairs. Here's an example:
+This parameter specifies the interval (in _seconds_) at which the pipeline
+should automatically flush received items through the processing chain and on
+to the exporters. Each time this interval is reached, the pipeline will flush
+items even if the item buffer has not reached the size specified by the
+[`receiveBufferSize`](#receiveBufferSize) parameter.
- ```bash
- # Example jar path "/databricks/jars/newrelic-agent-8.10.0.jar"
+###### `instances`
- echo "spark.driver.extraJavaOptions -javaagent:${NR_JAR_PATH}"
- echo "spark.executor.extraJavaOptions -javaagent:${NR_JAR_PATH}"
- ```
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Number of concurrent pipeline instances to run | number | N | 3 |
-4. **Verify the script was executed:** After your cluster starts/restarts, you should verify that the script was executed successfully. You can do this by checking the cluster logs via the `Logs` tab on your clusters page.
+The integration retrieves, processes, and exports metrics to New Relic using
+a data pipeline consisting of one or more receivers, a processing chain, and a
+New Relic exporter. When [`runAsService`](#runasservice) is `true`, the
+integration can launch one or more "instances" of this pipeline to receive,
+process, and export data concurrently. Each "instance" will be configured with
+the same processing chain and exporters and the receivers will be spread across
+the available instances in a round-robin fashion.
-***Note***: Any changes to the script settings will apply only to new clusters or when existing clusters are restarted.
+This parameter specifies the number of pipeline instances to launch.
+
+**NOTE:** When [`runAsService`](#runasservice) is `false`, only a single
+pipeline instance is used.
+
+##### Log configuration
+
+###### `level`
+
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Log level | `panic` / `fatal` / `error` / `warn` / `info` / `debug` / `trace` | N | `warn` |
+
+This parameter specifies the maximum severity of log messages to output with
+`trace` being the least severe and `panic` being the most severe. For example,
+at the default log level (`warn`), all log messages with severities `warn`,
+`error`, `fatal`, and `panic` will be output but `info`, `debug`, and `trace`
+will not.
+
+###### `fileName`
+
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Path to a file where log output will be written | string | N | `stderr` |
+
+This parameter designates a file path where log output should be written. When
+no path is specified, log output will be written to the standard error stream
+(`stderr`).
+
+##### Databricks configuration
+
+###### `accessToken`
+
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Databricks access token | string | Y | N/a |
+
+This parameter specifies the Databricks [personal access token](https://docs.databricks.com/en/dev-tools/auth/pat.html)
+used by the integration to authenticate Databricks API calls.
+
+The access token can also be specified using the `DATABRICKS_ACCESSTOKEN`
+environment variable.
+
+###### `workspaceHost`
+
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| Databricks workspace instance name | string | Y | N/a |
+
+This parameter specifies the [instance name](https://docs.databricks.com/en/workspace/workspace-details.html)
+of the target Databricks instance for which data should be collected. This is
+used by the integration when constructing the URLs for API calls. Note that the
+value of this parameter _must not_ include the `https://` prefix, e.g.
+`https://my-databricks-instance-name.cloud.databricks.com`.
+
+The workspace host can also be specified using the `DATABRICKS_WORKSPACEHOST`
+environment variable.
+
+##### Spark configuration
+
+###### `metricPrefix`
+
+| Description | Valid Values | Required | Default |
+| --- | --- | --- | --- |
+| A prefix to prepend to Spark metric names | string | N | `spark.` |
+
+This parameter specifies a prefix that will be prepended to each Spark metric
+name when the metric is exported to New Relic.
+
+For example, if this parameter is set to `spark.` (the default), then the full
+name of the metric representing the value of the memory used on application
+executors (`app.executor.memoryUsed`) will be `spark.app.executor.memoryUsed`.
+
+**NOTE:** It is not recommended to leave this value empty as the metric names
+without a prefix may be ambiguous.
## Support
-New Relic hosts and moderates an online forum where customers can interact with New Relic employees as well as other customers to get help and share best practices. If you're running into a problem, please raise an issue on this repository and we will try to help you ASAP. Please bear in mind this is an open source project and hence it isn't directly supported by New Relic.
+New Relic has open-sourced this project. This project is provided AS-IS WITHOUT
+WARRANTY OR DEDICATED SUPPORT. Issues and contributions should be reported to
+the project here on GitHub.
+
+We encourage you to bring your experiences and questions to the
+[Explorers Hub](https://discuss.newrelic.com/) where our community members
+collaborate on solutions and new ideas.
+
+### Privacy
+
+At New Relic we take your privacy and the security of your information
+seriously, and are committed to protecting your information. We must emphasize
+the importance of not sharing personal data in public forums, and ask all users
+to scrub logs and diagnostic information for sensitive information, whether
+personal, proprietary, or otherwise.
-## Contribute
+We define “Personal Data” as any information relating to an identified or
+identifiable individual, including, for example, your name, phone number, post
+code or zip code, Device ID, IP address, and email address.
-We encourage your contributions to improve New relic Databricks Integration! Keep in mind that when you submit your pull request, you'll need to sign the CLA via the click-through using CLA-Assistant. You only have to sign the CLA one time per project.
+For more information, review [New Relic’s General Data Privacy Notice](https://newrelic.com/termsandconditions/privacy).
-If you have any questions, or to execute our corporate CLA (which is required if your contribution is on behalf of a company), drop us an email at .
+### Contribute
+
+We encourage your contributions to improve this project! Keep in mind that
+when you submit your pull request, you'll need to sign the CLA via the
+click-through using CLA-Assistant. You only have to sign the CLA one time per
+project.
+
+If you have any questions, or to execute our corporate CLA (which is required
+if your contribution is on behalf of a company), drop us an email at
+opensource@newrelic.com.
**A note about vulnerabilities**
-As noted in our [security policy](../../security/policy), New Relic is committed to the privacy and security of our customers and their data. We believe that providing coordinated disclosure by security researchers and engaging with the security community are important means to achieve our security goals.
+As noted in our [security policy](../../security/policy), New Relic is committed
+to the privacy and security of our customers and their data. We believe that
+providing coordinated disclosure by security researchers and engaging with the
+security community are important means to achieve our security goals.
-If you believe you have found a security vulnerability in this project or any of New Relic's products or websites, we welcome and greatly appreciate you reporting it to New Relic through [our bug bounty program](https://docs.newrelic.com/docs/security/security-privacy/information-security/report-security-vulnerabilities/).
+If you believe you have found a security vulnerability in this project or any of
+New Relic's products or websites, we welcome and greatly appreciate you
+reporting it to New Relic through [HackerOne](https://hackerone.com/newrelic).
If you would like to contribute to this project, review [these guidelines](./CONTRIBUTING.md).
-## License
+To all contributors, we thank you! Without your contribution, this project
+would not be what it is today.
+
+### License
-New Relic Databricks Integration is licensed under the [Apache 2.0](http://apache.org/licenses/LICENSE-2.0.txt) License.
+The New Relic Databricks Integration project is licensed under the
+[Apache 2.0](http://apache.org/licenses/LICENSE-2.0.txt) License.
diff --git a/cmd/databricks/databricks.go b/cmd/databricks/databricks.go
new file mode 100644
index 0000000..f29e4c4
--- /dev/null
+++ b/cmd/databricks/databricks.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/newrelic-experimental/newrelic-databricks-integration/internal/databricks"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/log"
+ "github.com/spf13/viper"
+)
+
+var (
+ /* Args below are populated via ldflags at build time */
+ gIntegrationID = "com.newrelic.labs.newrelic-databricks-integration"
+ gIntegrationName = "New Relic Databricks Integration"
+ gIntegrationVersion = "2.0.0"
+ gGitCommit = ""
+ gBuildDate = ""
+ gBuildInfo = integration.BuildInfo{
+ Id: gIntegrationID,
+ Name: gIntegrationName,
+ Version: gIntegrationVersion,
+ GitCommit: gGitCommit,
+ BuildDate: gBuildDate,
+ }
+)
+
+func main() {
+ // Create a new background context to use
+ ctx := context.Background()
+
+ // Create the integration with options
+ i, err := integration.NewStandaloneIntegration(
+ &gBuildInfo,
+ gBuildInfo.Name,
+ integration.WithInterval(60),
+ integration.WithLicenseKey(),
+ integration.WithApiKey(),
+ integration.WithEvents(ctx),
+ )
+ fatalIfErr(err)
+
+ mode := viper.GetString("mode")
+ if mode == "" {
+ mode = "databricks"
+ }
+
+ switch mode {
+ case "databricks":
+ err = databricks.InitPipelines(ctx, i)
+ fatalIfErr(err)
+
+ // @TODO: support any spark context
+ //case "spark":
+ // err = spark.InitPipelines(i)
+ // fatalIfErr(err)
+
+ // @TODO: support other cluster providers/modes like yarn/k8s
+
+ default:
+ fatalIfErr(fmt.Errorf("unrecognized mode %s", mode))
+ }
+
+ // Run the integration
+ defer i.Shutdown(ctx)
+ err = i.Run(ctx)
+ fatalIfErr(err)
+}
+
+func fatalIfErr(err error) {
+ if err != nil {
+ log.Fatalf(err)
+ }
+}
diff --git a/cmd/standalone/main.go b/cmd/standalone/main.go
deleted file mode 100644
index 1c0e318..0000000
--- a/cmd/standalone/main.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package main
-
-import (
- "os"
-
- log "github.com/sirupsen/logrus"
-
- "newrelic/multienv/pkg/env/standalone"
-)
-
-func main() {
- configFile := "./config.yaml"
- if len(os.Args) > 1 {
- configFile = os.Args[1]
- }
-
- log.Print("Loading config file: " + configFile)
-
- pipeConf, err := standalone.LoadConfig(configFile)
- if err != nil {
- log.Error("Error loading config: ", err)
- os.Exit(1)
- }
-
- err = standalone.Start(pipeConf)
- if err != nil {
- os.Exit(2)
- }
-
- select {}
-}
diff --git a/commitlint.config.js b/commitlint.config.js
new file mode 100644
index 0000000..4fedde6
--- /dev/null
+++ b/commitlint.config.js
@@ -0,0 +1 @@
+module.exports = { extends: ['@commitlint/config-conventional'] }
diff --git a/configs/config.template.yml b/configs/config.template.yml
new file mode 100644
index 0000000..3aef05a
--- /dev/null
+++ b/configs/config.template.yml
@@ -0,0 +1,30 @@
+apiKey: [YOUR_NEW_RELIC_API_KEY]
+licenseKey: [YOUR_NEW_RELIC_LICENSE_KEY]
+accountId: 123456
+region: US
+interval: 60
+runAsService: false
+pipeline:
+ receiveBufferSize: 500
+ harvestInterval: 60
+ instances: 3
+log:
+ level: warn
+ fileName: trace.log
+
+mode: databricks
+
+databricks:
+ accessToken: [YOUR_DATABRICKS_ACCESS_TOKEN]
+ workspaceHost: [YOUR_DATABRICKS_WORKSPACE_INSTANCE_NAME]
+ # RESERVED FOR FUTURE USE
+ #accountHost: https://accounts.cloud.databricks.com
+ # RESERVED FOR FUTURE USE
+ #sparkMetrics: true
+
+spark:
+ # RESERVED FOR FUTURE USE
+ #contexts:
+ #- contextUrl1
+ #- contextUrl2
+ metricPrefix: spark.
diff --git a/configs/example_config.yaml b/configs/example_config.yaml
deleted file mode 100644
index 1f2ef7d..0000000
--- a/configs/example_config.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Config example file to configure a data pipeline for the Standalone Environment.
-#
-# "interval", number of seconds between executions. Optional, default value 60.
-# "exporter", string, where to export data. Possible values: "nrinfra", "nrmetrics", "nrevents", "nrlogs", "nrtraces", "otel" and "prom"
-# "buffer", size of channel buffers. Optional, default value 100.
-
-interval: 60
-
-# Custom keys here...
-
-# Size of communication buffer between modules
-# buffer: 500
-
-# New Relic API credentials
-nr_account_id: xxxxx
-nr_user_key: xxxxx
-nr_api_key: xxxxx
-nr_endpoint: US
-
-# Databricks Credentials
-db_access_token: xxxxx
-spark_endpoint: xxxxx
-databricks_endpoint: xxxxx
-
-# Exporter batching and timing
-batch_size: 50
-harvest_time: 10
\ No newline at end of file
diff --git a/examples/spark-dashboard-executors.png b/examples/spark-dashboard-executors.png
new file mode 100644
index 0000000..95fe120
Binary files /dev/null and b/examples/spark-dashboard-executors.png differ
diff --git a/examples/spark-dashboard-jobs.png b/examples/spark-dashboard-jobs.png
new file mode 100644
index 0000000..66d4c85
Binary files /dev/null and b/examples/spark-dashboard-jobs.png differ
diff --git a/examples/spark-dashboard-rdds.png b/examples/spark-dashboard-rdds.png
new file mode 100644
index 0000000..0bbff22
Binary files /dev/null and b/examples/spark-dashboard-rdds.png differ
diff --git a/examples/spark-dashboard-stages.png b/examples/spark-dashboard-stages.png
new file mode 100644
index 0000000..deee19c
Binary files /dev/null and b/examples/spark-dashboard-stages.png differ
diff --git a/examples/spark-daskboard.json b/examples/spark-daskboard.json
new file mode 100644
index 0000000..ac73f78
--- /dev/null
+++ b/examples/spark-daskboard.json
@@ -0,0 +1,1633 @@
+{
+ "name": "Apache Spark",
+ "description": null,
+ "permissions": "PUBLIC_READ_WRITE",
+ "pages": [
+ {
+ "name": "Jobs",
+ "description": null,
+ "widgets": [
+ {
+ "title": "Jobs Running",
+ "layout": {
+ "column": 1,
+ "row": 1,
+ "width": 3,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.jobs) AS 'Jobs' WHERE sparkAppJobStatus = 'running'"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Stages Running",
+ "layout": {
+ "column": 4,
+ "row": 1,
+ "width": 3,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stages) AS 'Stages' WHERE sparkAppStageStatus = 'active'"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Tasks Running",
+ "layout": {
+ "column": 7,
+ "row": 1,
+ "width": 3,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.job.tasks) AS 'Tasks' WHERE sparkAppTaskStatus = 'active'"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Executors",
+ "layout": {
+ "column": 10,
+ "row": 1,
+ "width": 3,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT uniqueCount(sparkAppExecutorId) AS 'Executors' WHERE metricName = 'spark.app.executor.maxMemory'"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Jobs By Status",
+ "layout": {
+ "column": 1,
+ "row": 4,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.jobs) AS 'Jobs' FACET sparkAppJobStatus TIMESERIES"
+ }
+ ],
+ "nullValues": {
+ "nullValue": "zero"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Job Tasks By Status",
+ "layout": {
+ "column": 5,
+ "row": 4,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.job.tasks) AS 'Tasks' FACET sparkAppTaskStatus TIMESERIES"
+ }
+ ],
+ "nullValues": {
+ "nullValue": "zero"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Job Stages By Status",
+ "layout": {
+ "column": 9,
+ "row": 4,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.job.stages) AS 'Stages' FACET sparkAppStageStatus TIMESERIES"
+ }
+ ],
+ "nullValues": {
+ "nullValue": "zero"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "name": "Stages",
+ "description": null,
+ "widgets": [
+ {
+ "title": "Average Executor Total Task Run Time",
+ "layout": {
+ "column": 1,
+ "row": 1,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.stage.executor.runTime) WHERE spark.app.stage.executor.runTime IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Executor Total Task CPU Time",
+ "layout": {
+ "column": 5,
+ "row": 1,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.stage.executor.cpuTime) / 1000000 WHERE spark.app.stage.executor.cpuTime IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average JVM GC Time",
+ "layout": {
+ "column": 9,
+ "row": 1,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.stage.jvmGcTime) WHERE spark.app.stage.jvmGcTime IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Task Deserialization Time",
+ "layout": {
+ "column": 1,
+ "row": 4,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.stage.executor.deserializeTime) WHERE spark.app.stage.executor.deserializeTime IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Task Deserialization CPU Time",
+ "layout": {
+ "column": 5,
+ "row": 4,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.stage.executor.deserializeCpuTime) / 1000000 WHERE spark.app.stage.executor.deserializeCpuTime IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Result Serialization Time",
+ "layout": {
+ "column": 9,
+ "row": 4,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.stage.resultSerializationTime) WHERE spark.app.stage.resultSerializationTime IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "I/O Bytes",
+ "layout": {
+ "column": 1,
+ "row": 7,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.inputBytes) AS 'Bytes In' WHERE spark.app.stage.inputBytes IS NOT NULL TIMESERIES"
+ },
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.outputBytes) AS 'Bytes Out' WHERE spark.app.stage.outputBytes IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "I/O Records",
+ "layout": {
+ "column": 7,
+ "row": 7,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.inputRecords) AS 'Records In' WHERE spark.app.stage.inputRecords IS NOT NULL TIMESERIES"
+ },
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.outputRecords) AS 'Records Out' WHERE spark.app.stage.outputRecords IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Shuffle I/O Bytes",
+ "layout": {
+ "column": 1,
+ "row": 10,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.shuffle.readBytes) AS 'Bytes Written' WHERE spark.app.stage.shuffle.readBytes IS NOT NULL TIMESERIES"
+ },
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.shuffle.writeBytes) AS 'Bytes Out' WHERE spark.app.stage.shuffle.writeBytes IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Shuffle I/O Records",
+ "layout": {
+ "column": 7,
+ "row": 10,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.shuffle.readRecords) AS 'Records Read' WHERE spark.app.stage.shuffle.readRecords IS NOT NULL TIMESERIES"
+ },
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.stage.shuffle.writeRecords) AS 'Records Written' WHERE spark.app.stage.shuffle.writeRecords IS NOT NULL TIMESERIES"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "name": "Executors",
+ "description": null,
+ "widgets": [
+ {
+ "title": "Average Driver Memory Used",
+ "layout": {
+ "column": 1,
+ "row": 1,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.memoryUsed) AS 'Bytes' WHERE sparkAppExecutorId = 'driver' AND spark.app.executor.memoryUsed IS NOT NULL TIMESERIES"
+ }
+ ],
+ "nullValues": {
+ "nullValue": "default"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Driver Disk Used",
+ "layout": {
+ "column": 7,
+ "row": 1,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.diskUsed) AS 'Bytes' WHERE sparkAppExecutorId = 'driver' AND spark.app.executor.diskUsed IS NOT NULL TIMESERIES "
+ }
+ ],
+ "nullValues": {
+ "nullValue": "default"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Executor Memory Used",
+ "layout": {
+ "column": 1,
+ "row": 4,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.memoryUsed) as 'Bytes' WHERE sparkAppExecutorId != 'driver' AND spark.app.executor.memoryUsed IS NOT NULL TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "nullValues": {
+ "nullValue": "default"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Executor Disk Used",
+ "layout": {
+ "column": 7,
+ "row": 4,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.diskUsed) AS 'Bytes' WHERE sparkAppExecutorId != 'driver' AND spark.app.executor.diskUsed IS NOT NULL TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "nullValues": {
+ "nullValue": "default"
+ },
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Active Tasks",
+ "layout": {
+ "column": 1,
+ "row": 7,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.activeTasks) AS 'Tasks' TIMESERIES WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Complete Tasks",
+ "layout": {
+ "column": 5,
+ "row": 7,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.completedTasks) AS 'Tasks' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Failed Tasks",
+ "layout": {
+ "column": 9,
+ "row": 7,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.failedTasks) AS 'Tasks' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Average Executor Used On Heap Memory",
+ "layout": {
+ "column": 1,
+ "row": 10,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.memory.usedOnHeapStorageMemory) WHERE sparkAppExecutorId != 'driver' AND spark.app.executor.memory.usedOnHeapStorageMemory IS NOT NULL TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Executor Used Off Heap Memory",
+ "layout": {
+ "column": 7,
+ "row": 10,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.memory.usedOffHeapStorageMemory) WHERE sparkAppExecutorId != 'driver' AND spark.app.executor.memory.usedOffHeapStorageMemory IS NOT NULL TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Executor Peak JVM Heap Memory",
+ "layout": {
+ "column": 1,
+ "row": 13,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.memory.peak.jvmHeap) WHERE sparkAppExecutorId != 'driver' AND spark.app.executor.memory.peak.jvmHeap IS NOT NULL TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Executor Peak JVM Off Heap Memory",
+ "layout": {
+ "column": 7,
+ "row": 13,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.executor.memory.peak.jvmOffHeap) WHERE sparkAppExecutorId != 'driver' AND spark.app.executor.memory.peak.jvmOffHeap IS NOT NULL TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Total Executor JVM Task Duration (seconds)",
+ "layout": {
+ "column": 1,
+ "row": 16,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.bar"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.totalDuration) / 1000 AS 'Seconds' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Total Executor JVM GC Time (seconds)",
+ "layout": {
+ "column": 5,
+ "row": 16,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.bar"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.totalGCTime) / 1000 AS 'Seconds' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "RDD Blocks",
+ "layout": {
+ "column": 9,
+ "row": 16,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.rddBlocks) AS 'RDD Blocks' WHERE sparkAppExecutorId != 'driver' TIMESERIES FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Total Input Bytes Summed",
+ "layout": {
+ "column": 1,
+ "row": 19,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "dataFormatters": [
+ {
+ "name": "Bytes",
+ "type": "humanized"
+ }
+ ],
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.totalInputBytes) AS 'Bytes' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Total Shuffle Read Bytes Summed",
+ "layout": {
+ "column": 5,
+ "row": 19,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "dataFormatters": [
+ {
+ "name": "Bytes",
+ "type": "humanized"
+ }
+ ],
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.totalShuffleRead) AS 'Bytes' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Total Shuffle Write Bytes Summed",
+ "layout": {
+ "column": 9,
+ "row": 19,
+ "width": 4,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "dataFormatters": [
+ {
+ "name": "Bytes",
+ "type": "humanized"
+ }
+ ],
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.executor.totalShuffleWrite) AS 'Bytes' WHERE sparkAppExecutorId != 'driver' FACET sparkAppExecutorId"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ }
+ ]
+ },
+ {
+ "name": "RDDs",
+ "description": null,
+ "widgets": [
+ {
+ "title": "Total Partitions By App & RDD",
+ "layout": {
+ "column": 1,
+ "row": 1,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.table"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.storage.rdd.partitions) AS 'Partitions' WHERE spark.app.storage.rdd.partitions IS NOT NULL FACET sparkAppName, sparkAppRDDName "
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Cached Partitions By App & RDD",
+ "layout": {
+ "column": 7,
+ "row": 1,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.table"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT latest(spark.app.storage.rdd.cachedPartitions) AS 'Partitions' WHERE spark.app.storage.rdd.cachedPartitions IS NOT NULL FACET sparkAppName, sparkAppRDDName "
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Average Memory Used By App & RDD",
+ "layout": {
+ "column": 1,
+ "row": 4,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.storage.rdd.memory.used) WHERE spark.app.storage.rdd.memory.used IS NOT NULL TIMESERIES FACET sparkAppName, sparkAppRDDName"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Disk Used By App & RDD",
+ "layout": {
+ "column": 7,
+ "row": 4,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.storage.rdd.disk.used) WHERE spark.app.storage.rdd.disk.used IS NOT NULL TIMESERIES FACET sparkAppName, sparkAppRDDName"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Memory Used by RDD Partition Block Name",
+ "layout": {
+ "column": 1,
+ "row": 7,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.storage.rdd.partition.memory.used) WHERE spark.app.storage.rdd.partition.memory.used IS NOT NULL TIMESERIES FACET sparkAppRDDName, sparkAppRddPartitionBlockName"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Average Disk Used by RDD Partition Block Name",
+ "layout": {
+ "column": 7,
+ "row": 7,
+ "width": 6,
+ "height": 3
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [
+ 0
+ ],
+ "query": "FROM Metric SELECT average(spark.app.storage.rdd.partition.disk.used) WHERE spark.app.storage.rdd.partition.disk.used IS NOT NULL TIMESERIES FACET sparkAppRDDName, sparkAppRddPartitionBlockName"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "thresholds": {
+ "isLabelVisible": true
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "variables": []
+ }
diff --git a/go.mod b/go.mod
index b0911ae..091d1cd 100644
--- a/go.mod
+++ b/go.mod
@@ -1,41 +1,77 @@
-module newrelic/multienv
+module github.com/newrelic-experimental/newrelic-databricks-integration
go 1.20
require (
- github.com/sirupsen/logrus v1.9.0
- github.com/spf13/viper v1.18.2
+ github.com/databricks/databricks-sdk-go v0.43.0
+ github.com/newrelic/newrelic-labs-sdk v1.5.0
)
require (
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/snappy v0.0.4 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/imdario/mergo v0.3.12 // indirect
github.com/magiconair/properties v1.8.7 // indirect
+ github.com/newrelic/go-agent/v3 v3.21.0 // indirect
+ github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrlogrus v1.0.0 // indirect
github.com/newrelic/infrastructure-agent v0.0.0-20201127092132-00ac7efc0cc6 // indirect
- github.com/pelletier/go-toml/v2 v2.1.0 // indirect
- github.com/prometheus/prometheus v0.40.3 // indirect
+ github.com/newrelic/newrelic-client-go v1.1.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.2 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
- github.com/sagikazarmark/locafero v0.4.0 // indirect
+ github.com/sagikazarmark/locafero v0.6.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/viper v1.19.0
+ github.com/stretchr/testify v1.9.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
- go.uber.org/atomic v1.10.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
- golang.org/x/text v0.14.0 // indirect
+ github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect
+ github.com/valyala/fastjson v1.6.3 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
+ golang.org/x/net v0.25.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
+ google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/protobuf v1.34.1 // indirect
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
)
require (
- github.com/aws/aws-lambda-go v1.41.0
- github.com/castai/promwrite v0.5.0
- github.com/mitchellh/mapstructure v1.5.0
- github.com/newrelic/infra-integrations-sdk/v4 v4.2.1
- golang.org/x/sys v0.15.0 // indirect
- gopkg.in/yaml.v3 v3.0.1
+ cloud.google.com/go/auth v0.4.2 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/google/s2a-go v0.1.7 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/newrelic/infra-integrations-sdk/v4 v4.2.1 // indirect
+ github.com/sirupsen/logrus v1.9.0 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
+ go.opentelemetry.io/otel v1.24.0 // indirect
+ go.opentelemetry.io/otel/metric v1.24.0 // indirect
+ go.opentelemetry.io/otel/trace v1.24.0 // indirect
+ golang.org/x/crypto v0.23.0 // indirect
+ golang.org/x/mod v0.18.0 // indirect
+ golang.org/x/oauth2 v0.20.0 // indirect
+ golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/time v0.5.0 // indirect
+ google.golang.org/api v0.182.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
+
+// @TODO: remove after development
+// replace github.com/newrelic/newrelic-labs-sdk v1.5.0 => ../../newrelic/newrelic-labs-sdk
diff --git a/go.sum b/go.sum
index e331495..ac65f7d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,233 +1,152 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
-cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ=
-cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
-cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI=
-cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
-github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg=
+cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc=
+cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
+cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
-github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U=
-github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
-github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
-github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
-github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y=
-github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM=
github.com/aws/aws-sdk-go v1.25.14-0.20200515182354-0961961790e6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
-github.com/aws/aws-sdk-go v1.44.128/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/castai/promwrite v0.5.0 h1:AxpHvaeWPqk+GLqLix0JkALzwLk5ZIMUemqvL4AAv5k=
-github.com/castai/promwrite v0.5.0/go.mod h1:PCwrucOaNJAcKdR8Tktz+/pQEXOnCWFL+2Yk7c9DmEU=
-github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/containerd/containerd v1.3.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/databricks/databricks-sdk-go v0.43.0 h1:x4laolWhYlsQg2t8yWEGyRPZy4/Wv3pKnLEoJfVin7I=
+github.com/databricks/databricks-sdk-go v0.43.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/digitalocean/godo v1.88.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
-github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/elazarl/goproxy v0.0.0-20220417044921-416226498f94 h1:VIy7cdK7ufs7ctpTFkXJHm1uP3dJSnCGSPysEICB1so=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v0.6.13/go.mod h1:qEySVqXrEugbHKvmhI8ZqtQi75/RHSSRNpffvB4I6Bw=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
-github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fortytw2/leaktest v1.3.1-0.20190606143808-d73c753520d9/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
-github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
-github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
-github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
-github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
-github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
-github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
-github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
-github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
-github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.2-0.20181116123445-07eab6a8298c/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20221102093814-76f304f74e5e/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
-github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gophercloud/gophercloud v1.0.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
+github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w=
-github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g=
-github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
-github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
+github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ=
github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
-github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005/go.mod h1:vgJmrz4Bz9E1cR/uy70oP9udUJKFRkcEYHlHTp4nFwI=
-github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go v1.35.3/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/ionos-cloud/sdk-go/v6 v6.1.3/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kardianos/service v1.1.0/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kolo/xmlrpc v0.0.0-20200310150728-e0350524596b/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ=
-github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/linode/linodego v1.9.3/go.mod h1:h6AuFR/JpqwwM/vkj7s8KV3iGN8/jxn+zc437F8SZ8w=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8=
-github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/newrelic/go-agent/v3 v3.21.0 h1:KpkoW6PnSVzEDEO0W/C9LZEZZGwAb+a9g5DN8ifvt4Y=
+github.com/newrelic/go-agent/v3 v3.21.0/go.mod h1:rT6ZUxJc5rQbWLyCtjqQCOcfb01lKRFbc1yMQkcboWM=
+github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrlogrus v1.0.0 h1:i7maT5Pi3qv2xlUU/vm/C5BkG8YMLlIHfIWtMmXz7cY=
+github.com/newrelic/go-agent/v3/integrations/logcontext-v2/nrlogrus v1.0.0/go.mod h1:zYcBp4EDE47PUsZZAzEZ36QGC9YU2Wx9FSQ3goi7cCg=
github.com/newrelic/infra-identity-client-go v1.0.2/go.mod h1:lrG2ompP2Mr6D8WW615/h2AYNs9B9pw2zLuc38LNb4E=
github.com/newrelic/infra-integrations-sdk/v4 v4.2.1 h1:lh8sQgpdv0bCFi9dHOf5FxiSxrvcS5Qp2RuHTjR1kN8=
github.com/newrelic/infra-integrations-sdk/v4 v4.2.1/go.mod h1:Xctd58maTLaNbZ1dxfz9h1iSSnVA8llDzGnt59M2dHs=
github.com/newrelic/infrastructure-agent v0.0.0-20201127092132-00ac7efc0cc6 h1:oo3278EfxLk4kp6p8ln4td5AkqgdAM/f+FHYMA8X3o8=
github.com/newrelic/infrastructure-agent v0.0.0-20201127092132-00ac7efc0cc6/go.mod h1:OC9Em8HnZsHI3JQzMHFqfB4B7OBzQ2+gBffhS0Ip+pk=
-github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/newrelic/newrelic-client-go v1.1.0 h1:aflNjzQ21c+2GwBVh+UbAf9lznkRfCcVABoc5UM4IXw=
+github.com/newrelic/newrelic-client-go v1.1.0/go.mod h1:RYMXt7hgYw7nzuXIGd2BH0F1AivgWw7WrBhNBQZEB4k=
+github.com/newrelic/newrelic-labs-sdk v1.5.0 h1:WW9rXlxSEvbRbpFr97hGDFb/s33gsXcsjFlwwKU0N0A=
+github.com/newrelic/newrelic-labs-sdk v1.5.0/go.mod h1:YeQXK6c2PWwSzdceLW3qVXdgi7G8flVnfRpQSHSPNqI=
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2-0.20181029102219-09950c5fb1bb/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/ovh/go-ovh v1.1.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA=
-github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
-github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI=
-github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
-github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
-github.com/prometheus/exporter-toolkit v0.8.1/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/prometheus v0.40.3 h1:oMw1vVyrxHTigXAcFY6QHrGUnQEbKEOKo737cPgYBwY=
-github.com/prometheus/prometheus v0.40.3/go.mod h1:/UhsWkOXkO11wqTW2Bx5YDOwRweSDcaFBlTIzFe7P0Y=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/sagikazarmark/crypt v0.17.0/go.mod h1:SMtHTvdmsZMuY/bpZoqokSoChIrcJ/epOxZN58PbZDg=
-github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
-github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
+github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
+github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/shirou/gopsutil v2.18.12-0.20181220224138-a5ace91ccec8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
-github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/sirupsen/logrus v1.6.1-0.20200528085638-6699a89a232f/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
@@ -239,82 +158,75 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
-github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
+github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tevino/abool v1.2.0/go.mod h1:qc66Pna1RiIsPa7O4Egxxs9OqkuxDX55zznh9K07Tzg=
-github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
-go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
-go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
-go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
-go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
+github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y=
+github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE=
+github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc=
+github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA=
-go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1/go.mod h1:i8vjiSzbiUC7wOQplijSXMYUpNM93DtlS5CbUT+C6oQ=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1/go.mod h1:19O5I2U5iys38SsmT2uDJja/300woyzE1KPIQxEUBUc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1/go.mod h1:QrRRQiY3kzAoYPNLP0W/Ikg0gR6V3LMc+ODSxr7yyvg=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.1/go.mod h1:X620Jww3RajCJXw/unA+8IRTgxkdS7pi+ZwK9b7KUJk=
-go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
-go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
-go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
-go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
-go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/dl v0.0.0-20200901180525-35ca1c5c19fb/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
+golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
+golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -325,70 +237,63 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3-0.20190829152558-3d0f7978add9/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
+google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE=
+google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.1-0.20181123051433-bcbf6e613274+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
-k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
-k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/klog/v2 v2.80.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
-k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/integration/databricks/api.go b/integration/databricks/api.go
deleted file mode 100644
index 9efa7fe..0000000
--- a/integration/databricks/api.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package databricks
-
-import (
- "encoding/json"
- "errors"
- log "github.com/sirupsen/logrus"
- "newrelic/multienv/integration/utils"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/model"
- "reflect"
- "time"
-)
-
-func GetDatabricksConnectors(pipeConfig *config.PipelineConfig) ([]connect.Connector, error) {
-
- licenseKey := ""
- databricksEndpoint := ""
- databricksCloudProvider := ""
-
- if databricksLicenseKey, ok := pipeConfig.GetString("db_access_token"); ok {
- licenseKey = databricksLicenseKey
- } else {
- return nil, errors.New("config key 'db_access_token' doesn't exist")
- }
-
- if databricksEndpointUrl, ok := pipeConfig.GetString("databricks_endpoint"); ok {
- databricksEndpoint = databricksEndpointUrl
- } else {
- return nil, errors.New("config key 'databricks_endpoint' doesn't exist")
- }
-
- if databricksCloud, ok := pipeConfig.GetString("db_cloud"); ok {
- databricksCloudProvider = databricksCloud
- } else {
- return nil, errors.New("config key 'db_cloud' doesn't exist")
- }
-
- var headers = make(map[string]string)
- headers["Authorization"] = "Bearer " + licenseKey
- headers["Content-Type"] = "application/json"
- headers["Accept"] = "application/json"
-
- var connectors []connect.Connector
-
- queryHistoryConnector := connect.MakeHttpGetConnector(databricksEndpoint+"/api/2.0/sql/history/queries?include_metrics=true", headers)
-
- if databricksCloudProvider == "GCP" {
- queryHistoryConnector.SetConnectorModelName("GCPDatabricksQueryList")
- } else if databricksCloudProvider == "AWS" {
- queryHistoryConnector.SetConnectorModelName("AWSDatabricksQueryList")
- } else if databricksCloudProvider == "AZURE" {
- queryHistoryConnector.SetConnectorModelName("AzureDatabricksQueryList")
- }
-
- jobRunsListConnector := connect.MakeHttpGetConnector(databricksEndpoint+"/api/2.1/jobs/runs/list", headers)
- if databricksCloudProvider == "GCP" {
- jobRunsListConnector.SetConnectorModelName("GCPDatabricksJobsRunsList")
- } else if databricksCloudProvider == "AWS" {
- jobRunsListConnector.SetConnectorModelName("AWSDatabricksJobsRunsList")
- } else if databricksCloudProvider == "AZURE" {
- jobRunsListConnector.SetConnectorModelName("AzureDatabricksJobsRunsList")
- }
-
- pipelinesListConnector := connect.MakeHttpGetConnector(databricksEndpoint+"/api/2.0/pipelines?max_results=100", headers)
- if databricksCloudProvider == "GCP" {
- pipelinesListConnector.SetConnectorModelName("GCPDatabricksPipelinesList")
- } else if databricksCloudProvider == "AWS" {
- pipelinesListConnector.SetConnectorModelName("AWSDatabricksPipelinesList")
- } else if databricksCloudProvider == "AZURE" {
- pipelinesListConnector.SetConnectorModelName("AzureDatabricksPipelinesList")
- }
-
- connectors = append(connectors, &jobRunsListConnector, &queryHistoryConnector)
-
- return connectors, nil
-}
-
-func InitDatabricksProc(pipeConfig *config.PipelineConfig) (config.ProcConfig, error) {
-
- recv_interval = int(pipeConfig.Interval)
- if recv_interval == 0 {
- log.Warn("Interval not set, using 5 seconds")
- recv_interval = 5
- }
-
- return config.ProcConfig{}, nil
-}
-
-// DatabricksProc Proc Generate all kinds of data.
-func DatabricksProc(data any) []model.MeltModel {
-
- responseModel := data.(map[string]any)["model"]
- responseData := data.(map[string]any)["response"]
-
- out := make([]model.MeltModel, 0)
-
- switch responseModel {
-
- case "AWSDatabricksQueryList":
-
- var databricksJobsListModel = AWSQueriesList{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Res); i++ {
- e := reflect.ValueOf(&databricksJobsListModel.Res[i]).Elem()
- tags := make(map[string]interface{})
- tags["QueryId"] = databricksJobsListModel.Res[i].QueryId
- metricModels := utils.CreateMetricModels("AWSDatabricksQuery", e, tags)
- out = append(out, metricModels...)
- }
-
- case "AWSDatabricksJobsRunsList":
- var databricksJobsListModel = AWSJobRuns{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Runs); i++ {
-
- metricModel := createEventModels("AWS", databricksJobsListModel.Runs[i])
- out = append(out, metricModel)
- }
-
- case "AWSDatabricksPipelinesList":
- var databricksJobsListModel = AWSPipelinesList{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Statuses); i++ {
-
- metricModel := createEventModels("AWS", databricksJobsListModel.Statuses[i])
- out = append(out, metricModel)
- }
-
- case "GCPDatabricksQueryList":
-
- var databricksJobsListModel = GCPQueriesList{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Res); i++ {
- e := reflect.ValueOf(&databricksJobsListModel.Res[i]).Elem()
- tags := make(map[string]interface{})
- tags["QueryId"] = databricksJobsListModel.Res[i].QueryId
- metricModels := utils.CreateMetricModels("GCPDatabricksQuery", e, tags)
- out = append(out, metricModels...)
- }
-
- case "GCPDatabricksJobsRunsList":
- var databricksJobsListModel = GCPJobRuns{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Runs); i++ {
-
- metricModel := createEventModels("GCP", databricksJobsListModel.Runs[i])
- out = append(out, metricModel)
- }
-
- case "GCPDatabricksPipelinesList":
- var databricksJobsListModel = GCPPipelinesList{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Statuses); i++ {
-
- metricModel := createEventModels("GCP", databricksJobsListModel.Statuses[i])
- out = append(out, metricModel)
- }
- case "AzureDatabricksQueryList":
-
- var databricksJobsListModel = AzureQueriesList{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Res); i++ {
- e := reflect.ValueOf(&databricksJobsListModel.Res[i]).Elem()
- tags := make(map[string]interface{})
- tags["QueryId"] = databricksJobsListModel.Res[i].QueryId
- metricModels := utils.CreateMetricModels("AzureDatabricksQuery", e, tags)
- out = append(out, metricModels...)
- }
-
- case "AzureDatabricksJobsRunsList":
- var databricksJobsListModel = AzureJobRuns{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Runs); i++ {
-
- metricModel := createEventModels("Azure", databricksJobsListModel.Runs[i])
- out = append(out, metricModel)
- }
-
- case "AzureDatabricksPipelinesList":
- var databricksJobsListModel = AzurePipelinesList{}
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &databricksJobsListModel)
- if err != nil {
- return nil
- }
-
- for i := 0; i < len(databricksJobsListModel.Statuses); i++ {
-
- metricModel := createEventModels("Azure", databricksJobsListModel.Statuses[i])
- out = append(out, metricModel)
- }
- default:
- log.Println("Unknown response model in Databricks integration")
- }
-
- return out
-}
-
-func createEventModels(cloud string, data any) model.MeltModel {
- jsonBytes, err := json.Marshal(data)
- if err != nil {
- log.Println(err.Error())
- }
- var jsonString map[string]interface{}
-
- err = json.Unmarshal(jsonBytes, &jsonString)
- if err != nil {
- log.Println(err.Error())
- }
-
- return model.MakeEvent(cloud+"DatabricksJobsRuns", jsonString, time.Now())
-}
diff --git a/integration/databricks/databricks.go b/integration/databricks/databricks.go
deleted file mode 100644
index 5a313a7..0000000
--- a/integration/databricks/databricks.go
+++ /dev/null
@@ -1,1005 +0,0 @@
-package databricks
-
-var recv_interval = 0
-
-type AWSJobRuns struct {
- Runs []struct {
- JobId int `json:"job_id"`
- RunId int `json:"run_id"`
- NumberInJob int `json:"number_in_job"`
- CreatorUserName string `json:"creator_user_name"`
- OriginalAttemptRunId int `json:"original_attempt_run_id"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- QueueReason string `json:"queue_reason"`
- ResultState string `json:"result_state"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- StateMessage string `json:"state_message"`
- } `json:"state"`
- Schedule struct {
- QuartzCronExpression string `json:"quartz_cron_expression"`
- TimezoneId string `json:"timezone_id"`
- PauseStatus string `json:"pause_status"`
- } `json:"schedule"`
- Tasks []struct {
- SetupDuration int `json:"setup_duration"`
- StartTime int64 `json:"start_time"`
- TaskKey string `json:"task_key"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state,omitempty"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- } `json:"state"`
- Description string `json:"description"`
- JobClusterKey string `json:"job_cluster_key,omitempty"`
- EndTime int64 `json:"end_time"`
- RunPageUrl string `json:"run_page_url"`
- RunId int `json:"run_id"`
- ClusterInstance struct {
- ClusterId string `json:"cluster_id"`
- SparkContextId string `json:"spark_context_id,omitempty"`
- } `json:"cluster_instance"`
- SparkJarTask struct {
- MainClassName string `json:"main_class_name"`
- } `json:"spark_jar_task,omitempty"`
- Libraries []struct {
- Jar string `json:"jar"`
- } `json:"libraries,omitempty"`
- AttemptNumber int `json:"attempt_number"`
- CleanupDuration int `json:"cleanup_duration"`
- ExecutionDuration int `json:"execution_duration"`
- RunIf string `json:"run_if"`
- NotebookTask struct {
- NotebookPath string `json:"notebook_path"`
- Source string `json:"source"`
- } `json:"notebook_task,omitempty"`
- DependsOn []struct {
- TaskKey string `json:"task_key"`
- } `json:"depends_on,omitempty"`
- NewCluster struct {
- SparkVersion string `json:"spark_version"`
- NodeTypeId interface{} `json:"node_type_id"`
- SparkConf struct {
- SparkSpeculation bool `json:"spark.speculation"`
- } `json:"spark_conf"`
- Autoscale struct {
- MinWorkers int `json:"min_workers"`
- MaxWorkers int `json:"max_workers"`
- } `json:"autoscale"`
- } `json:"new_cluster,omitempty"`
- ExistingClusterId string `json:"existing_cluster_id,omitempty"`
- } `json:"tasks"`
- JobClusters []struct {
- JobClusterKey string `json:"job_cluster_key"`
- NewCluster struct {
- SparkVersion string `json:"spark_version"`
- NodeTypeId interface{} `json:"node_type_id"`
- SparkConf struct {
- SparkSpeculation bool `json:"spark.speculation"`
- } `json:"spark_conf"`
- Autoscale struct {
- MinWorkers int `json:"min_workers"`
- MaxWorkers int `json:"max_workers"`
- } `json:"autoscale"`
- } `json:"new_cluster"`
- } `json:"job_clusters"`
- ClusterSpec struct {
- ExistingClusterId string `json:"existing_cluster_id"`
- NewCluster struct {
- NumWorkers int `json:"num_workers"`
- Autoscale struct {
- MinWorkers int `json:"min_workers"`
- MaxWorkers int `json:"max_workers"`
- } `json:"autoscale"`
- ClusterName string `json:"cluster_name"`
- SparkVersion string `json:"spark_version"`
- SparkConf struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"spark_conf"`
- AwsAttributes struct {
- FirstOnDemand string `json:"first_on_demand"`
- Availability string `json:"availability"`
- ZoneId string `json:"zone_id"`
- InstanceProfileArn string `json:"instance_profile_arn"`
- SpotBidPricePercent string `json:"spot_bid_price_percent"`
- EbsVolumeType string `json:"ebs_volume_type"`
- EbsVolumeCount string `json:"ebs_volume_count"`
- EbsVolumeSize int `json:"ebs_volume_size"`
- EbsVolumeIops int `json:"ebs_volume_iops"`
- EbsVolumeThroughput int `json:"ebs_volume_throughput"`
- } `json:"aws_attributes"`
- NodeTypeId string `json:"node_type_id"`
- DriverNodeTypeId string `json:"driver_node_type_id"`
- SshPublicKeys []string `json:"ssh_public_keys"`
- CustomTags struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"custom_tags"`
- ClusterLogConf struct {
- Dbfs struct {
- Destination string `json:"destination"`
- } `json:"dbfs"`
- S3 struct {
- Destination string `json:"destination"`
- Region string `json:"region"`
- Endpoint string `json:"endpoint"`
- EnableEncryption bool `json:"enable_encryption"`
- EncryptionType string `json:"encryption_type"`
- KmsKey string `json:"kms_key"`
- CannedAcl string `json:"canned_acl"`
- } `json:"s3"`
- } `json:"cluster_log_conf"`
- InitScripts []struct {
- Workspace struct {
- Destination string `json:"destination"`
- } `json:"workspace"`
- Volumes struct {
- Destination string `json:"destination"`
- } `json:"volumes"`
- S3 struct {
- Destination string `json:"destination"`
- Region string `json:"region"`
- Endpoint string `json:"endpoint"`
- EnableEncryption bool `json:"enable_encryption"`
- EncryptionType string `json:"encryption_type"`
- KmsKey string `json:"kms_key"`
- CannedAcl string `json:"canned_acl"`
- } `json:"s3"`
- File struct {
- Destination string `json:"destination"`
- } `json:"file"`
- Dbfs struct {
- Destination string `json:"destination"`
- } `json:"dbfs"`
- Abfss struct {
- Destination string `json:"destination"`
- } `json:"abfss"`
- Gcs struct {
- Destination string `json:"destination"`
- } `json:"gcs"`
- } `json:"init_scripts"`
- SparkEnvVars struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"spark_env_vars"`
- AutoterminationMinutes int `json:"autotermination_minutes"`
- EnableElasticDisk bool `json:"enable_elastic_disk"`
- ClusterSource string `json:"cluster_source"`
- InstancePoolId string `json:"instance_pool_id"`
- PolicyId string `json:"policy_id"`
- EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption"`
- DriverInstancePoolId string `json:"driver_instance_pool_id"`
- WorkloadType struct {
- Clients struct {
- Notebooks string `json:"notebooks"`
- Jobs string `json:"jobs"`
- } `json:"clients"`
- } `json:"workload_type"`
- RuntimeEngine string `json:"runtime_engine"`
- DockerImage struct {
- Url string `json:"url"`
- BasicAuth struct {
- Username string `json:"username"`
- Password string `json:"password"`
- } `json:"basic_auth"`
- } `json:"docker_image"`
- DataSecurityMode string `json:"data_security_mode"`
- SingleUserName string `json:"single_user_name"`
- ApplyPolicyDefaultValues string `json:"apply_policy_default_values"`
- } `json:"new_cluster"`
- Libraries []struct {
- Jar string `json:"jar"`
- Egg string `json:"egg"`
- Pypi struct {
- Package string `json:"package"`
- Repo string `json:"repo"`
- } `json:"pypi"`
- Maven struct {
- Coordinates string `json:"coordinates"`
- Repo string `json:"repo"`
- Exclusions []string `json:"exclusions"`
- } `json:"maven"`
- Cran struct {
- Package string `json:"package"`
- Repo string `json:"repo"`
- } `json:"cran"`
- Whl string `json:"whl"`
- } `json:"libraries"`
- } `json:"cluster_spec"`
- ClusterInstance struct {
- ClusterId string `json:"cluster_id"`
- SparkContextId string `json:"spark_context_id"`
- } `json:"cluster_instance"`
- GitSource struct {
- GitUrl string `json:"git_url"`
- GitBranch string `json:"git_branch"`
- GitProvider string `json:"git_provider"`
- } `json:"git_source"`
- OverridingParameters struct {
- JarParams []string `json:"jar_params"`
- NotebookParams struct {
- Name string `json:"name"`
- Age string `json:"age"`
- } `json:"notebook_params"`
- PythonParams []string `json:"python_params"`
- SparkSubmitParams []string `json:"spark_submit_params"`
- PythonNamedParams struct {
- Name string `json:"name"`
- Data string `json:"data"`
- } `json:"python_named_params"`
- PipelineParams struct {
- FullRefresh bool `json:"full_refresh"`
- } `json:"pipeline_params"`
- SqlParams struct {
- Name string `json:"name"`
- Age string `json:"age"`
- } `json:"sql_params"`
- DbtCommands []string `json:"dbt_commands"`
- JobParameters struct {
- Name string `json:"name"`
- Age string `json:"age"`
- } `json:"job_parameters"`
- } `json:"overriding_parameters"`
- StartTime int64 `json:"start_time"`
- SetupDuration int `json:"setup_duration"`
- ExecutionDuration int `json:"execution_duration"`
- CleanupDuration int `json:"cleanup_duration"`
- EndTime int64 `json:"end_time"`
- TriggerInfo struct {
- RunId int `json:"run_id"`
- } `json:"trigger_info"`
- RunDuration int `json:"run_duration"`
- Trigger string `json:"trigger"`
- RunName string `json:"run_name"`
- RunPageUrl string `json:"run_page_url"`
- RunType string `json:"run_type"`
- AttemptNumber int `json:"attempt_number"`
- JobParameters []struct {
- Name string `json:"name"`
- Default string `json:"default"`
- Value string `json:"value"`
- } `json:"job_parameters"`
- } `json:"runs"`
- HasMore bool `json:"has_more"`
- NextPageToken string `json:"next_page_token"`
- PrevPageToken string `json:"prev_page_token"`
-}
-
-type AWSQueriesList struct {
- NextPageToken string `json:"next_page_token"`
- HasNextPage bool `json:"has_next_page"`
- Res []struct {
- QueryId string `json:"query_id"`
- Status string `json:"status"`
- QueryText string `json:"query_text"`
- QueryStartTimeMs int64 `json:"query_start_time_ms"`
- ExecutionEndTimeMs int64 `json:"execution_end_time_ms"`
- QueryEndTimeMs int64 `json:"query_end_time_ms"`
- UserId int64 `json:"user_id"`
- UserName string `json:"user_name"`
- SparkUiUrl string `json:"spark_ui_url"`
- EndpointId string `json:"endpoint_id"`
- WarehouseId string `json:"warehouse_id"`
- LookupKey string `json:"lookup_key"`
- ErrorMessage string `json:"error_message"`
- RowsProduced int `json:"rows_produced"`
- CanSubscribeToLiveQuery bool `json:"canSubscribeToLiveQuery"`
- Metrics struct {
- TotalTimeMs int `json:"total_time_ms"`
- ReadBytes int `json:"read_bytes"`
- RowsProducedCount int `json:"rows_produced_count"`
- CompilationTimeMs int `json:"compilation_time_ms"`
- ExecutionTimeMs int `json:"execution_time_ms"`
- ReadRemoteBytes int `json:"read_remote_bytes"`
- WriteRemoteBytes int `json:"write_remote_bytes"`
- ReadCacheBytes int `json:"read_cache_bytes"`
- SpillToDiskBytes int `json:"spill_to_disk_bytes"`
- TaskTotalTimeMs int `json:"task_total_time_ms"`
- ReadFilesCount int `json:"read_files_count"`
- ReadPartitionsCount int `json:"read_partitions_count"`
- PhotonTotalTimeMs int `json:"photon_total_time_ms"`
- RowsReadCount int `json:"rows_read_count"`
- ResultFetchTimeMs int `json:"result_fetch_time_ms"`
- NetworkSentBytes int `json:"network_sent_bytes"`
- ResultFromCache bool `json:"result_from_cache"`
- PrunedBytes int `json:"pruned_bytes"`
- PrunedFilesCount int `json:"pruned_files_count"`
- ProvisioningQueueStartTimestamp int64 `json:"provisioning_queue_start_timestamp"`
- OverloadingQueueStartTimestamp int64 `json:"overloading_queue_start_timestamp"`
- QueryCompilationStartTimestamp int64 `json:"query_compilation_start_timestamp"`
- MetadataTimeMs int `json:"metadata_time_ms"`
- PlanningTimeMs int `json:"planning_time_ms"`
- QueryExecutionTimeMs int `json:"query_execution_time_ms"`
- } `json:"metrics"`
- IsFinal bool `json:"is_final"`
- ChannelUsed struct {
- Name string `json:"name"`
- DbsqlVersion string `json:"dbsql_version"`
- } `json:"channel_used"`
- Duration int `json:"duration"`
- ExecutedAsUserId int64 `json:"executed_as_user_id"`
- ExecutedAsUserName string `json:"executed_as_user_name"`
- PlansState string `json:"plans_state"`
- StatementType string `json:"statement_type"`
- } `json:"res"`
-}
-
-type GCPQueriesList struct {
- NextPageToken string `json:"next_page_token"`
- HasNextPage bool `json:"has_next_page"`
- Res []struct {
- QueryId string `json:"query_id"`
- Status string `json:"status"`
- QueryText string `json:"query_text"`
- QueryStartTimeMs int64 `json:"query_start_time_ms"`
- ExecutionEndTimeMs int64 `json:"execution_end_time_ms"`
- QueryEndTimeMs int64 `json:"query_end_time_ms"`
- UserId int64 `json:"user_id"`
- UserName string `json:"user_name"`
- SparkUiUrl string `json:"spark_ui_url"`
- EndpointId string `json:"endpoint_id"`
- WarehouseId string `json:"warehouse_id"`
- LookupKey string `json:"lookup_key"`
- ErrorMessage string `json:"error_message"`
- RowsProduced int `json:"rows_produced"`
- CanSubscribeToLiveQuery bool `json:"canSubscribeToLiveQuery"`
- Metrics struct {
- TotalTimeMs int `json:"total_time_ms"`
- ReadBytes int `json:"read_bytes"`
- RowsProducedCount int `json:"rows_produced_count"`
- CompilationTimeMs int `json:"compilation_time_ms"`
- ExecutionTimeMs int `json:"execution_time_ms"`
- ReadRemoteBytes int `json:"read_remote_bytes"`
- WriteRemoteBytes int `json:"write_remote_bytes"`
- ReadCacheBytes int `json:"read_cache_bytes"`
- SpillToDiskBytes int `json:"spill_to_disk_bytes"`
- TaskTotalTimeMs int `json:"task_total_time_ms"`
- ReadFilesCount int `json:"read_files_count"`
- ReadPartitionsCount int `json:"read_partitions_count"`
- PhotonTotalTimeMs int `json:"photon_total_time_ms"`
- RowsReadCount int `json:"rows_read_count"`
- ResultFetchTimeMs int `json:"result_fetch_time_ms"`
- NetworkSentBytes int `json:"network_sent_bytes"`
- ResultFromCache bool `json:"result_from_cache"`
- PrunedBytes int `json:"pruned_bytes"`
- PrunedFilesCount int `json:"pruned_files_count"`
- ProvisioningQueueStartTimestamp int64 `json:"provisioning_queue_start_timestamp"`
- OverloadingQueueStartTimestamp int64 `json:"overloading_queue_start_timestamp"`
- QueryCompilationStartTimestamp int64 `json:"query_compilation_start_timestamp"`
- MetadataTimeMs int `json:"metadata_time_ms"`
- PlanningTimeMs int `json:"planning_time_ms"`
- QueryExecutionTimeMs int `json:"query_execution_time_ms"`
- } `json:"metrics"`
- IsFinal bool `json:"is_final"`
- ChannelUsed struct {
- Name string `json:"name"`
- DbsqlVersion string `json:"dbsql_version"`
- } `json:"channel_used"`
- Duration int `json:"duration"`
- ExecutedAsUserId int64 `json:"executed_as_user_id"`
- ExecutedAsUserName string `json:"executed_as_user_name"`
- PlansState string `json:"plans_state"`
- StatementType string `json:"statement_type"`
- } `json:"res"`
-}
-
-type AzureQueriesList struct {
- NextPageToken string `json:"next_page_token"`
- HasNextPage bool `json:"has_next_page"`
- Res []struct {
- QueryId string `json:"query_id"`
- Status string `json:"status"`
- QueryText string `json:"query_text"`
- QueryStartTimeMs int64 `json:"query_start_time_ms"`
- ExecutionEndTimeMs int64 `json:"execution_end_time_ms"`
- QueryEndTimeMs int64 `json:"query_end_time_ms"`
- UserId int64 `json:"user_id"`
- UserName string `json:"user_name"`
- SparkUiUrl string `json:"spark_ui_url"`
- EndpointId string `json:"endpoint_id"`
- WarehouseId string `json:"warehouse_id"`
- LookupKey string `json:"lookup_key"`
- ErrorMessage string `json:"error_message"`
- RowsProduced int `json:"rows_produced"`
- CanSubscribeToLiveQuery bool `json:"canSubscribeToLiveQuery"`
- Metrics struct {
- TotalTimeMs int `json:"total_time_ms"`
- ReadBytes int `json:"read_bytes"`
- RowsProducedCount int `json:"rows_produced_count"`
- CompilationTimeMs int `json:"compilation_time_ms"`
- ExecutionTimeMs int `json:"execution_time_ms"`
- ReadRemoteBytes int `json:"read_remote_bytes"`
- WriteRemoteBytes int `json:"write_remote_bytes"`
- ReadCacheBytes int `json:"read_cache_bytes"`
- SpillToDiskBytes int `json:"spill_to_disk_bytes"`
- TaskTotalTimeMs int `json:"task_total_time_ms"`
- ReadFilesCount int `json:"read_files_count"`
- ReadPartitionsCount int `json:"read_partitions_count"`
- PhotonTotalTimeMs int `json:"photon_total_time_ms"`
- RowsReadCount int `json:"rows_read_count"`
- ResultFetchTimeMs int `json:"result_fetch_time_ms"`
- NetworkSentBytes int `json:"network_sent_bytes"`
- ResultFromCache bool `json:"result_from_cache"`
- PrunedBytes int `json:"pruned_bytes"`
- PrunedFilesCount int `json:"pruned_files_count"`
- ProvisioningQueueStartTimestamp int64 `json:"provisioning_queue_start_timestamp"`
- OverloadingQueueStartTimestamp int64 `json:"overloading_queue_start_timestamp"`
- QueryCompilationStartTimestamp int64 `json:"query_compilation_start_timestamp"`
- MetadataTimeMs int `json:"metadata_time_ms"`
- PlanningTimeMs int `json:"planning_time_ms"`
- QueryExecutionTimeMs int `json:"query_execution_time_ms"`
- } `json:"metrics"`
- IsFinal bool `json:"is_final"`
- ChannelUsed struct {
- Name string `json:"name"`
- DbsqlVersion string `json:"dbsql_version"`
- } `json:"channel_used"`
- Duration int `json:"duration"`
- ExecutedAsUserId int64 `json:"executed_as_user_id"`
- ExecutedAsUserName string `json:"executed_as_user_name"`
- PlansState string `json:"plans_state"`
- StatementType string `json:"statement_type"`
- } `json:"res"`
-}
-
-type GCPJobRuns struct {
- Runs []struct {
- JobId int `json:"job_id"`
- RunId int `json:"run_id"`
- CreatorUserName string `json:"creator_user_name"`
- NumberInJob int `json:"number_in_job"`
- OriginalAttemptRunId int `json:"original_attempt_run_id"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- QueueReason string `json:"queue_reason"`
- } `json:"state"`
- Schedule struct {
- QuartzCronExpression string `json:"quartz_cron_expression"`
- TimezoneId string `json:"timezone_id"`
- PauseStatus string `json:"pause_status"`
- } `json:"schedule"`
- ClusterSpec struct {
- ExistingClusterId string `json:"existing_cluster_id"`
- NewCluster struct {
- NumWorkers int `json:"num_workers"`
- Autoscale struct {
- MinWorkers int `json:"min_workers"`
- MaxWorkers int `json:"max_workers"`
- } `json:"autoscale"`
- ClusterName string `json:"cluster_name"`
- SparkVersion string `json:"spark_version"`
- SparkConf struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"spark_conf"`
- GcpAttributes struct {
- GoogleServiceAccount string `json:"google_service_account"`
- BootDiskSize int `json:"boot_disk_size"`
- Availability string `json:"availability"`
- LocalSsdCount int `json:"local_ssd_count"`
- UsePreemptibleExecutors bool `json:"use_preemptible_executors"`
- ZoneId string `json:"zone_id"`
- } `json:"gcp_attributes"`
- NodeTypeId string `json:"node_type_id"`
- DriverNodeTypeId string `json:"driver_node_type_id"`
- SshPublicKeys []string `json:"ssh_public_keys"`
- CustomTags struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"custom_tags"`
- ClusterLogConf struct {
- Dbfs struct {
- Destination string `json:"destination"`
- } `json:"dbfs"`
- } `json:"cluster_log_conf"`
- InitScripts []struct {
- Workspace struct {
- Destination string `json:"destination"`
- } `json:"workspace"`
- Volumes struct {
- Destination string `json:"destination"`
- } `json:"volumes"`
- File struct {
- Destination string `json:"destination"`
- } `json:"file"`
- Dbfs struct {
- Destination string `json:"destination"`
- } `json:"dbfs"`
- Abfss struct {
- Destination string `json:"destination"`
- } `json:"abfss"`
- Gcs struct {
- Destination string `json:"destination"`
- } `json:"gcs"`
- } `json:"init_scripts"`
- SparkEnvVars struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"spark_env_vars"`
- AutoterminationMinutes int `json:"autotermination_minutes"`
- EnableElasticDisk bool `json:"enable_elastic_disk"`
- ClusterSource string `json:"cluster_source"`
- InstancePoolId string `json:"instance_pool_id"`
- PolicyId string `json:"policy_id"`
- EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption"`
- DriverInstancePoolId string `json:"driver_instance_pool_id"`
- WorkloadType struct {
- Clients struct {
- Notebooks string `json:"notebooks"`
- Jobs string `json:"jobs"`
- } `json:"clients"`
- } `json:"workload_type"`
- RuntimeEngine string `json:"runtime_engine"`
- DockerImage struct {
- Url string `json:"url"`
- BasicAuth struct {
- Username string `json:"username"`
- Password string `json:"password"`
- } `json:"basic_auth"`
- } `json:"docker_image"`
- DataSecurityMode string `json:"data_security_mode"`
- SingleUserName string `json:"single_user_name"`
- ApplyPolicyDefaultValues string `json:"apply_policy_default_values"`
- } `json:"new_cluster"`
- JobClusterKey string `json:"job_cluster_key"`
- Libraries []struct {
- Jar string `json:"jar"`
- Egg string `json:"egg"`
- Pypi struct {
- Package string `json:"package"`
- Repo string `json:"repo"`
- } `json:"pypi"`
- Maven struct {
- Coordinates string `json:"coordinates"`
- Repo string `json:"repo"`
- Exclusions []string `json:"exclusions"`
- } `json:"maven"`
- Cran struct {
- Package string `json:"package"`
- Repo string `json:"repo"`
- } `json:"cran"`
- Whl string `json:"whl"`
- } `json:"libraries"`
- } `json:"cluster_spec"`
- ClusterInstance struct {
- ClusterId string `json:"cluster_id"`
- SparkContextId string `json:"spark_context_id"`
- } `json:"cluster_instance"`
- OverridingParameters struct {
- JarParams []string `json:"jar_params"`
- NotebookParams struct {
- Age string `json:"age"`
- Name string `json:"name"`
- } `json:"notebook_params"`
- PythonParams []string `json:"python_params"`
- SparkSubmitParams []string `json:"spark_submit_params"`
- PythonNamedParams struct {
- Data string `json:"data"`
- Name string `json:"name"`
- } `json:"python_named_params"`
- DbtCommands []string `json:"dbt_commands"`
- PipelineParams struct {
- FullRefresh bool `json:"full_refresh"`
- } `json:"pipeline_params"`
- } `json:"overriding_parameters"`
- StartTime int64 `json:"start_time"`
- SetupDuration int `json:"setup_duration"`
- ExecutionDuration int `json:"execution_duration"`
- CleanupDuration int `json:"cleanup_duration"`
- EndTime int64 `json:"end_time"`
- RunDuration int `json:"run_duration"`
- QueueDuration int64 `json:"queue_duration"`
- Trigger string `json:"trigger"`
- TriggerInfo struct {
- RunId int `json:"run_id"`
- } `json:"trigger_info"`
- RunName string `json:"run_name"`
- RunPageUrl string `json:"run_page_url"`
- RunType string `json:"run_type"`
- Tasks []struct {
- SetupDuration int `json:"setup_duration"`
- StartTime int64 `json:"start_time"`
- TaskKey string `json:"task_key"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state,omitempty"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- } `json:"state"`
- Description string `json:"description"`
- JobClusterKey string `json:"job_cluster_key,omitempty"`
- EndTime int64 `json:"end_time"`
- RunPageUrl string `json:"run_page_url"`
- RunId int `json:"run_id"`
- ClusterInstance struct {
- ClusterId string `json:"cluster_id"`
- SparkContextId string `json:"spark_context_id,omitempty"`
- } `json:"cluster_instance"`
- SparkJarTask struct {
- MainClassName string `json:"main_class_name"`
- } `json:"spark_jar_task,omitempty"`
- Libraries []struct {
- Jar string `json:"jar"`
- } `json:"libraries,omitempty"`
- AttemptNumber int `json:"attempt_number"`
- CleanupDuration int `json:"cleanup_duration"`
- ExecutionDuration int `json:"execution_duration"`
- RunIf string `json:"run_if"`
- NotebookTask struct {
- NotebookPath string `json:"notebook_path"`
- Source string `json:"source"`
- } `json:"notebook_task,omitempty"`
- DependsOn []struct {
- TaskKey string `json:"task_key"`
- } `json:"depends_on,omitempty"`
- NewCluster struct {
- Autoscale struct {
- MaxWorkers int `json:"max_workers"`
- MinWorkers int `json:"min_workers"`
- } `json:"autoscale"`
- NodeTypeId interface{} `json:"node_type_id"`
- SparkConf struct {
- SparkSpeculation bool `json:"spark.speculation"`
- } `json:"spark_conf"`
- SparkVersion string `json:"spark_version"`
- } `json:"new_cluster,omitempty"`
- ExistingClusterId string `json:"existing_cluster_id,omitempty"`
- } `json:"tasks"`
- Description string `json:"description"`
- AttemptNumber int `json:"attempt_number"`
- JobClusters []struct {
- JobClusterKey string `json:"job_cluster_key"`
- NewCluster struct {
- Autoscale struct {
- MaxWorkers int `json:"max_workers"`
- MinWorkers int `json:"min_workers"`
- } `json:"autoscale"`
- NodeTypeId interface{} `json:"node_type_id"`
- SparkConf struct {
- SparkSpeculation bool `json:"spark.speculation"`
- } `json:"spark_conf"`
- SparkVersion string `json:"spark_version"`
- } `json:"new_cluster"`
- } `json:"job_clusters"`
- GitSource struct {
- GitBranch string `json:"git_branch"`
- GitProvider string `json:"git_provider"`
- GitUrl string `json:"git_url"`
- } `json:"git_source"`
- RepairHistory []struct {
- Type string `json:"type"`
- StartTime int64 `json:"start_time"`
- EndTime int64 `json:"end_time"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- QueueReason string `json:"queue_reason"`
- } `json:"state"`
- Id int64 `json:"id"`
- TaskRunIds []int64 `json:"task_run_ids"`
- } `json:"repair_history"`
- JobParameters []struct {
- Default string `json:"default"`
- Name string `json:"name"`
- Value string `json:"value"`
- } `json:"job_parameters"`
- } `json:"runs"`
- HasMore bool `json:"has_more"`
- NextPageToken string `json:"next_page_token"`
- PrevPageToken string `json:"prev_page_token"`
-}
-
-type AzureJobRuns struct {
- Runs []struct {
- JobId int `json:"job_id"`
- RunId int `json:"run_id"`
- CreatorUserName string `json:"creator_user_name"`
- NumberInJob int `json:"number_in_job"`
- OriginalAttemptRunId int `json:"original_attempt_run_id"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- QueueReason string `json:"queue_reason"`
- } `json:"state"`
- Schedule struct {
- QuartzCronExpression string `json:"quartz_cron_expression"`
- TimezoneId string `json:"timezone_id"`
- PauseStatus string `json:"pause_status"`
- } `json:"schedule"`
- ClusterSpec struct {
- ExistingClusterId string `json:"existing_cluster_id"`
- NewCluster struct {
- NumWorkers int `json:"num_workers"`
- Autoscale struct {
- MinWorkers int `json:"min_workers"`
- MaxWorkers int `json:"max_workers"`
- } `json:"autoscale"`
- ClusterName string `json:"cluster_name"`
- SparkVersion string `json:"spark_version"`
- SparkConf struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"spark_conf"`
- AzureAttributes struct {
- LogAnalyticsInfo struct {
- LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id"`
- LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key"`
- } `json:"log_analytics_info"`
- FirstOnDemand string `json:"first_on_demand"`
- Availability string `json:"availability"`
- SpotBidMaxPrice string `json:"spot_bid_max_price"`
- } `json:"azure_attributes"`
- NodeTypeId string `json:"node_type_id"`
- DriverNodeTypeId string `json:"driver_node_type_id"`
- SshPublicKeys []string `json:"ssh_public_keys"`
- CustomTags struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"custom_tags"`
- ClusterLogConf struct {
- Dbfs struct {
- Destination string `json:"destination"`
- } `json:"dbfs"`
- } `json:"cluster_log_conf"`
- InitScripts []struct {
- Workspace struct {
- Destination string `json:"destination"`
- } `json:"workspace"`
- Volumes struct {
- Destination string `json:"destination"`
- } `json:"volumes"`
- File struct {
- Destination string `json:"destination"`
- } `json:"file"`
- Dbfs struct {
- Destination string `json:"destination"`
- } `json:"dbfs"`
- Abfss struct {
- Destination string `json:"destination"`
- } `json:"abfss"`
- Gcs struct {
- Destination string `json:"destination"`
- } `json:"gcs"`
- } `json:"init_scripts"`
- SparkEnvVars struct {
- Property1 string `json:"property1"`
- Property2 string `json:"property2"`
- } `json:"spark_env_vars"`
- AutoterminationMinutes int `json:"autotermination_minutes"`
- EnableElasticDisk bool `json:"enable_elastic_disk"`
- ClusterSource string `json:"cluster_source"`
- InstancePoolId string `json:"instance_pool_id"`
- PolicyId string `json:"policy_id"`
- EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption"`
- DriverInstancePoolId string `json:"driver_instance_pool_id"`
- WorkloadType struct {
- Clients struct {
- Notebooks string `json:"notebooks"`
- Jobs string `json:"jobs"`
- } `json:"clients"`
- } `json:"workload_type"`
- RuntimeEngine string `json:"runtime_engine"`
- DockerImage struct {
- Url string `json:"url"`
- BasicAuth struct {
- Username string `json:"username"`
- Password string `json:"password"`
- } `json:"basic_auth"`
- } `json:"docker_image"`
- DataSecurityMode string `json:"data_security_mode"`
- SingleUserName string `json:"single_user_name"`
- ApplyPolicyDefaultValues string `json:"apply_policy_default_values"`
- } `json:"new_cluster"`
- JobClusterKey string `json:"job_cluster_key"`
- Libraries []struct {
- Jar string `json:"jar"`
- Egg string `json:"egg"`
- Pypi struct {
- Package string `json:"package"`
- Repo string `json:"repo"`
- } `json:"pypi"`
- Maven struct {
- Coordinates string `json:"coordinates"`
- Repo string `json:"repo"`
- Exclusions []string `json:"exclusions"`
- } `json:"maven"`
- Cran struct {
- Package string `json:"package"`
- Repo string `json:"repo"`
- } `json:"cran"`
- Whl string `json:"whl"`
- } `json:"libraries"`
- } `json:"cluster_spec"`
- ClusterInstance struct {
- ClusterId string `json:"cluster_id"`
- SparkContextId string `json:"spark_context_id"`
- } `json:"cluster_instance"`
- OverridingParameters struct {
- JarParams []string `json:"jar_params"`
- NotebookParams struct {
- Age string `json:"age"`
- Name string `json:"name"`
- } `json:"notebook_params"`
- PythonParams []string `json:"python_params"`
- SparkSubmitParams []string `json:"spark_submit_params"`
- PythonNamedParams struct {
- Data string `json:"data"`
- Name string `json:"name"`
- } `json:"python_named_params"`
- SqlParams struct {
- Age string `json:"age"`
- Name string `json:"name"`
- } `json:"sql_params"`
- DbtCommands []string `json:"dbt_commands"`
- PipelineParams struct {
- FullRefresh bool `json:"full_refresh"`
- } `json:"pipeline_params"`
- } `json:"overriding_parameters"`
- StartTime int64 `json:"start_time"`
- SetupDuration int `json:"setup_duration"`
- ExecutionDuration int `json:"execution_duration"`
- CleanupDuration int `json:"cleanup_duration"`
- EndTime int64 `json:"end_time"`
- RunDuration int `json:"run_duration"`
- QueueDuration int64 `json:"queue_duration"`
- Trigger string `json:"trigger"`
- TriggerInfo struct {
- RunId int `json:"run_id"`
- } `json:"trigger_info"`
- RunName string `json:"run_name"`
- RunPageUrl string `json:"run_page_url"`
- RunType string `json:"run_type"`
- Tasks []struct {
- SetupDuration int `json:"setup_duration"`
- StartTime int64 `json:"start_time"`
- TaskKey string `json:"task_key"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state,omitempty"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- } `json:"state"`
- Description string `json:"description"`
- JobClusterKey string `json:"job_cluster_key,omitempty"`
- EndTime int64 `json:"end_time"`
- RunPageUrl string `json:"run_page_url"`
- RunId int `json:"run_id"`
- ClusterInstance struct {
- ClusterId string `json:"cluster_id"`
- SparkContextId string `json:"spark_context_id,omitempty"`
- } `json:"cluster_instance"`
- SparkJarTask struct {
- MainClassName string `json:"main_class_name"`
- } `json:"spark_jar_task,omitempty"`
- Libraries []struct {
- Jar string `json:"jar"`
- } `json:"libraries,omitempty"`
- AttemptNumber int `json:"attempt_number"`
- CleanupDuration int `json:"cleanup_duration"`
- ExecutionDuration int `json:"execution_duration"`
- RunIf string `json:"run_if"`
- NotebookTask struct {
- NotebookPath string `json:"notebook_path"`
- Source string `json:"source"`
- } `json:"notebook_task,omitempty"`
- DependsOn []struct {
- TaskKey string `json:"task_key"`
- } `json:"depends_on,omitempty"`
- NewCluster struct {
- Autoscale struct {
- MaxWorkers int `json:"max_workers"`
- MinWorkers int `json:"min_workers"`
- } `json:"autoscale"`
- NodeTypeId interface{} `json:"node_type_id"`
- SparkConf struct {
- SparkSpeculation bool `json:"spark.speculation"`
- } `json:"spark_conf"`
- SparkVersion string `json:"spark_version"`
- } `json:"new_cluster,omitempty"`
- ExistingClusterId string `json:"existing_cluster_id,omitempty"`
- } `json:"tasks"`
- Description string `json:"description"`
- AttemptNumber int `json:"attempt_number"`
- JobClusters []struct {
- JobClusterKey string `json:"job_cluster_key"`
- NewCluster struct {
- Autoscale struct {
- MaxWorkers int `json:"max_workers"`
- MinWorkers int `json:"min_workers"`
- } `json:"autoscale"`
- NodeTypeId interface{} `json:"node_type_id"`
- SparkConf struct {
- SparkSpeculation bool `json:"spark.speculation"`
- } `json:"spark_conf"`
- SparkVersion string `json:"spark_version"`
- } `json:"new_cluster"`
- } `json:"job_clusters"`
- GitSource struct {
- GitBranch string `json:"git_branch"`
- GitProvider string `json:"git_provider"`
- GitUrl string `json:"git_url"`
- } `json:"git_source"`
- RepairHistory []struct {
- Type string `json:"type"`
- StartTime int64 `json:"start_time"`
- EndTime int64 `json:"end_time"`
- State struct {
- LifeCycleState string `json:"life_cycle_state"`
- ResultState string `json:"result_state"`
- StateMessage string `json:"state_message"`
- UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout"`
- QueueReason string `json:"queue_reason"`
- } `json:"state"`
- Id int64 `json:"id"`
- TaskRunIds []int64 `json:"task_run_ids"`
- } `json:"repair_history"`
- JobParameters []struct {
- Default string `json:"default"`
- Name string `json:"name"`
- Value string `json:"value"`
- } `json:"job_parameters"`
- } `json:"runs"`
- HasMore bool `json:"has_more"`
- NextPageToken string `json:"next_page_token"`
- PrevPageToken string `json:"prev_page_token"`
-}
-
-type AWSPipelinesList struct {
- Statuses []struct {
- PipelineId string `json:"pipeline_id"`
- State string `json:"state"`
- ClusterId string `json:"cluster_id"`
- Name string `json:"name"`
- LatestUpdates []struct {
- UpdateId string `json:"update_id"`
- State string `json:"state"`
- CreationTime string `json:"creation_time"`
- } `json:"latest_updates"`
- CreatorUserName string `json:"creator_user_name"`
- RunAsUserName string `json:"run_as_user_name"`
- } `json:"statuses"`
- NextPageToken string `json:"next_page_token"`
-}
-
-type GCPPipelinesList struct {
- Statuses []struct {
- PipelineId string `json:"pipeline_id"`
- State string `json:"state"`
- ClusterId string `json:"cluster_id"`
- Name string `json:"name"`
- LatestUpdates []struct {
- UpdateId string `json:"update_id"`
- State string `json:"state"`
- CreationTime string `json:"creation_time"`
- } `json:"latest_updates"`
- CreatorUserName string `json:"creator_user_name"`
- RunAsUserName string `json:"run_as_user_name"`
- } `json:"statuses"`
- NextPageToken string `json:"next_page_token"`
-}
-
-type AzurePipelinesList struct {
- Statuses []struct {
- PipelineId string `json:"pipeline_id"`
- State string `json:"state"`
- ClusterId string `json:"cluster_id"`
- Name string `json:"name"`
- LatestUpdates []struct {
- UpdateId string `json:"update_id"`
- State string `json:"state"`
- CreationTime string `json:"creation_time"`
- } `json:"latest_updates"`
- CreatorUserName string `json:"creator_user_name"`
- RunAsUserName string `json:"run_as_user_name"`
- } `json:"statuses"`
- NextPageToken string `json:"next_page_token"`
-}
diff --git a/integration/integration.go b/integration/integration.go
deleted file mode 100644
index 32195b8..0000000
--- a/integration/integration.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package integration
-
-import (
- log "github.com/sirupsen/logrus"
- "newrelic/multienv/integration/databricks"
- "newrelic/multienv/integration/spark"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/deser"
- "newrelic/multienv/pkg/model"
-)
-
-var recv_interval = 60
-
-// InitRecv Integration Receiver Initializer
-func InitRecv(pipeConfig *config.PipelineConfig) (config.RecvConfig, error) {
-
- recv_interval = int(pipeConfig.Interval)
- if recv_interval == 0 {
- log.Warn("Interval not set, using 60 seconds")
- recv_interval = 60
- }
-
- var connectors []connect.Connector
-
- sparkConnectors, _ := spark.GetSparkConnectors(pipeConfig)
- databricksConnectors, _ := databricks.GetDatabricksConnectors(pipeConfig)
-
- connectors = append(connectors, sparkConnectors...)
- connectors = append(connectors, databricksConnectors...)
-
- return config.RecvConfig{
- Connectors: connectors,
- Deser: deser.DeserJson,
- }, nil
-}
-
-// InitProc Integration Processor Initializer
-func InitProc(pipeConfig *config.PipelineConfig) (config.ProcConfig, error) {
-
- recv_interval = int(pipeConfig.Interval)
- if recv_interval == 0 {
- log.Warn("Interval not set, using 60 seconds")
- recv_interval = 60
- }
-
- _, sparkError := spark.InitSparkProc(pipeConfig)
- if sparkError != nil {
- return config.ProcConfig{}, sparkError
- }
-
- _, databricksError := databricks.InitDatabricksProc(pipeConfig)
- if databricksError != nil {
- return config.ProcConfig{}, databricksError
- }
- return config.ProcConfig{
- Model: map[string]any{},
- }, nil
-}
-
-// Integration Processor
-func Proc(data any) []model.MeltModel {
-
- modelName := data.(map[string]any)["model"].(string)
-
- switch modelName {
- case "SparkJob", "SparkExecutor", "SparkStage":
- return spark.SparkProc(data)
-
- case "AWSDatabricksQueryList",
- "AWSDatabricksJobsRunsList",
- "GCPDatabricksQueryList",
- "GCPDatabricksJobsRunsList",
- "AzureDatabricksQueryList",
- "AzureDatabricksJobsRunsList",
- "AWSPipelinesList",
- "AzurePipelinesList",
- "GCPPipelinesList":
- return databricks.DatabricksProc(data)
-
- default:
- log.Println("Unknown response model " + modelName)
- }
-
- return nil
-}
diff --git a/integration/spark/api.go b/integration/spark/api.go
deleted file mode 100644
index 8a3a1e1..0000000
--- a/integration/spark/api.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package spark
-
-import (
- "encoding/json"
- "errors"
- "newrelic/multienv/integration/utils"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/model"
- "reflect"
-
- log "github.com/sirupsen/logrus"
-)
-
-func GetSparkConnectors(pipeConfig *config.PipelineConfig) ([]connect.Connector, error) {
- recv_interval = int(pipeConfig.Interval)
- if recv_interval == 0 {
- log.Warn("Interval not set, using 60 seconds")
- recv_interval = 60
- }
-
- licenseKey := ""
- sparkEndpoint := ""
-
- if databricksLicenseKey, ok := pipeConfig.GetString("db_access_token"); ok {
- licenseKey = databricksLicenseKey
- } else {
- return nil, errors.New("config key 'db_access_token' doesn't exist")
- }
-
- if sparkEndpointUrl, ok := pipeConfig.GetString("spark_endpoint"); ok {
- sparkEndpoint = sparkEndpointUrl
- } else {
- return nil, errors.New("config key 'spark_endpoint' doesn't exist")
- }
-
- var headers = make(map[string]string)
- headers["Authorization"] = "Bearer " + licenseKey
- headers["Content-Type"] = "application/json"
- headers["Accept"] = "application/json"
-
- apps, _ := GetSparkApplications(sparkEndpoint, headers)
-
- var connectors []connect.Connector
-
- for i := 0; i < len(apps); i++ {
-
- var appData map[string]interface{}
- appData = make(map[string]interface{})
- appData["sparkAppId"] = apps[i].ID
- appData["sparkAppName"] = apps[i].Name
-
- jobsConnector := connect.MakeHttpGetConnector(sparkEndpoint+"/api/v1/applications/"+apps[i].ID+"/jobs", headers)
- jobsConnector.SetConnectorModelName("SparkJob")
- jobsConnector.SetCustomData(appData)
-
- executorsConnector := connect.MakeHttpGetConnector(sparkEndpoint+"/api/v1/applications/"+apps[i].ID+"/executors", headers)
- executorsConnector.SetConnectorModelName("SparkExecutor")
- executorsConnector.SetCustomData(appData)
-
- stagesConnector := connect.MakeHttpGetConnector(sparkEndpoint+"/api/v1/applications/"+apps[i].ID+"/stages", headers)
- stagesConnector.SetConnectorModelName("SparkStage")
- stagesConnector.SetCustomData(appData)
-
- connectors = append(connectors, &jobsConnector, &executorsConnector, &stagesConnector)
- }
-
- return connectors, nil
-}
-
-func InitSparkProc(pipeConfig *config.PipelineConfig) (config.ProcConfig, error) {
- recv_interval = int(pipeConfig.Interval)
- if recv_interval == 0 {
- log.Warn("Interval not set, using 60 seconds")
- recv_interval = 60
- }
-
- return config.ProcConfig{}, nil
-}
-
-// SparkProc Proc Generate all kinds of data.
-func SparkProc(data any) []model.MeltModel {
-
- responseModel := data.(map[string]any)["model"]
- responseData := data.(map[string]any)["response"]
- customData := data.(map[string]any)["customData"].(map[string]interface{})
-
- switch responseModel {
- case "SparkStage":
- var sparkStagesModel = SparkStage{}
- tagPrefix := "sparkStage"
- metricPrefix := "spark.stage."
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &sparkStagesModel)
- if err != nil {
- return nil
- }
-
- e := reflect.ValueOf(&sparkStagesModel).Elem()
- tags := make(map[string]interface{})
- tags["sparkAppId"] = customData["sparkAppId"]
- tags["sparkAppName"] = customData["sparkAppName"]
- stagesTags := make(map[string]interface{})
- utils.SetTags(tagPrefix, e, tags, stagesTags)
- return utils.CreateMetricModels(metricPrefix, e, stagesTags)
-
- case "SparkJob":
-
- var sparkJobsModel = SparkJob{}
- tagPrefix := "sparkJob"
- metricPrefix := "spark.job."
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &sparkJobsModel)
- if err != nil {
- return nil
- }
-
- e := reflect.ValueOf(&sparkJobsModel).Elem()
- tags := make(map[string]interface{})
- tags["sparkAppId"] = customData["sparkAppId"]
- tags["sparkAppName"] = customData["sparkAppName"]
- sparkJobTags := make(map[string]interface{})
- utils.SetTags(tagPrefix, e, tags, sparkJobTags)
- return utils.CreateMetricModels(metricPrefix, e, sparkJobTags)
-
- case "SparkExecutor":
-
- var sparkExecutorsModel = SparkExecutor{}
- tagPrefix := "sparkExecutor"
- metricPrefix := "spark.executor."
- modelJson, _ := json.Marshal(responseData)
- err := json.Unmarshal(modelJson, &sparkExecutorsModel)
- if err != nil {
- return nil
- }
-
- e := reflect.ValueOf(&sparkExecutorsModel).Elem()
- tags := make(map[string]interface{})
- tags["sparkAppId"] = customData["sparkAppId"]
- tags["sparkAppName"] = customData["sparkAppName"]
- sparkExecutorTags := make(map[string]interface{})
- utils.SetTags(tagPrefix, e, tags, sparkExecutorTags)
- return utils.CreateMetricModels(metricPrefix, e, sparkExecutorTags)
-
- default:
- log.Println("Unknown response model in Spark integration")
- }
- return nil
-}
diff --git a/integration/spark/spark.go b/integration/spark/spark.go
deleted file mode 100644
index 9bd52b4..0000000
--- a/integration/spark/spark.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package spark
-
-import (
- "encoding/json"
- log "github.com/sirupsen/logrus"
- "io"
- "net/http"
-)
-
-var recv_interval = 0
-var licenseKey = ""
-var sparkEndpoint = ""
-
-type ApplicationsResponse []struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Attempts []struct {
- StartTime string `json:"startTime"`
- EndTime string `json:"endTime"`
- LastUpdated string `json:"lastUpdated"`
- Duration int64 `json:"duration"`
- SparkUser string `json:"sparkUser"`
- Completed bool `json:"completed"`
- AppSparkVersion string `json:"appSparkVersion"`
- EndTimeEpoch int64 `json:"endTimeEpoch"`
- StartTimeEpoch int64 `json:"startTimeEpoch"`
- LastUpdatedEpoch int64 `json:"lastUpdatedEpoch"`
- } `json:"attempts"`
-}
-
-type SparkJob struct {
- JobId int `json:"jobId"`
- Name string `json:"name"`
- Description string `json:"description"`
- SubmissionTime string `json:"submissionTime"`
- CompletionTime string `json:"completionTime"`
- StageIds []int `json:"stageIds"`
- JobGroup string `json:"jobGroup"`
- JobTags []interface{} `json:"jobTags"`
- Status string `json:"status"`
- NumTasks int `json:"numTasks"`
- NumActiveTasks int `json:"numActiveTasks"`
- NumCompletedTasks int `json:"numCompletedTasks"`
- NumSkippedTasks int `json:"numSkippedTasks"`
- NumFailedTasks int `json:"numFailedTasks"`
- NumKilledTasks int `json:"numKilledTasks"`
- NumCompletedIndices int `json:"numCompletedIndices"`
- NumActiveStages int `json:"numActiveStages"`
- NumCompletedStages int `json:"numCompletedStages"`
- NumSkippedStages int `json:"numSkippedStages"`
- NumFailedStages int `json:"numFailedStages"`
- KilledTasksSummary struct {
- } `json:"killedTasksSummary"`
-}
-
-type SparkExecutor struct {
- Id string `json:"id"`
- HostPort string `json:"hostPort"`
- IsActive bool `json:"isActive"`
- RddBlocks int `json:"rddBlocks"`
- MemoryUsed int `json:"memoryUsed"`
- DiskUsed int `json:"diskUsed"`
- TotalCores int `json:"totalCores"`
- MaxTasks int `json:"maxTasks"`
- ActiveTasks int `json:"activeTasks"`
- FailedTasks int `json:"failedTasks"`
- CompletedTasks int `json:"completedTasks"`
- TotalTasks int `json:"totalTasks"`
- TotalDuration int `json:"totalDuration"`
- TotalGCTime int `json:"totalGCTime"`
- TotalInputBytes int `json:"totalInputBytes"`
- TotalShuffleRead int `json:"totalShuffleRead"`
- TotalShuffleWrite int `json:"totalShuffleWrite"`
- IsBlacklisted bool `json:"isBlacklisted"`
- MaxMemory int `json:"maxMemory"`
- AddTime string `json:"addTime"`
- ExecutorLogs struct {
- Stdout string `json:"stdout,omitempty"`
- Stderr string `json:"stderr,omitempty"`
- } `json:"executorLogs"`
- MemoryMetrics struct {
- UsedOnHeapStorageMemory int `json:"usedOnHeapStorageMemory"`
- UsedOffHeapStorageMemory int `json:"usedOffHeapStorageMemory"`
- TotalOnHeapStorageMemory int `json:"totalOnHeapStorageMemory"`
- TotalOffHeapStorageMemory int `json:"totalOffHeapStorageMemory"`
- } `json:"memoryMetrics"`
- BlacklistedInStages []interface{} `json:"blacklistedInStages"`
- PeakMemoryMetrics struct {
- JVMHeapMemory int `json:"JVMHeapMemory"`
- JVMOffHeapMemory int `json:"JVMOffHeapMemory"`
- OnHeapExecutionMemory int `json:"OnHeapExecutionMemory"`
- OffHeapExecutionMemory int `json:"OffHeapExecutionMemory"`
- OnHeapStorageMemory int `json:"OnHeapStorageMemory"`
- OffHeapStorageMemory int `json:"OffHeapStorageMemory"`
- OnHeapUnifiedMemory int `json:"OnHeapUnifiedMemory"`
- OffHeapUnifiedMemory int `json:"OffHeapUnifiedMemory"`
- DirectPoolMemory int `json:"DirectPoolMemory"`
- MappedPoolMemory int `json:"MappedPoolMemory"`
- NettyDirectMemory int `json:"NettyDirectMemory"`
- JvmDirectMemory int `json:"JvmDirectMemory"`
- SparkDirectMemoryOverLimit int `json:"SparkDirectMemoryOverLimit"`
- TotalOffHeapMemory int `json:"TotalOffHeapMemory"`
- ProcessTreeJVMVMemory int `json:"ProcessTreeJVMVMemory"`
- ProcessTreeJVMRSSMemory int `json:"ProcessTreeJVMRSSMemory"`
- ProcessTreePythonVMemory int `json:"ProcessTreePythonVMemory"`
- ProcessTreePythonRSSMemory int `json:"ProcessTreePythonRSSMemory"`
- ProcessTreeOtherVMemory int `json:"ProcessTreeOtherVMemory"`
- ProcessTreeOtherRSSMemory int `json:"ProcessTreeOtherRSSMemory"`
- MinorGCCount int `json:"MinorGCCount"`
- MinorGCTime int `json:"MinorGCTime"`
- MajorGCCount int `json:"MajorGCCount"`
- MajorGCTime int `json:"MajorGCTime"`
- TotalGCTime int `json:"TotalGCTime"`
- } `json:"peakMemoryMetrics"`
- Attributes struct {
- } `json:"attributes"`
- Resources struct {
- } `json:"resources"`
- ResourceProfileId int `json:"resourceProfileId"`
- IsExcluded bool `json:"isExcluded"`
- ExcludedInStages []interface{} `json:"excludedInStages"`
-}
-
-type SparkStage struct {
- Status string `json:"status"`
- StageId int `json:"stageId"`
- AttemptId int `json:"attemptId"`
- NumTasks int `json:"numTasks"`
- NumActiveTasks int `json:"numActiveTasks"`
- NumCompleteTasks int `json:"numCompleteTasks"`
- NumFailedTasks int `json:"numFailedTasks"`
- NumKilledTasks int `json:"numKilledTasks"`
- NumCompletedIndices int `json:"numCompletedIndices"`
- PeakNettyDirectMemory int `json:"peakNettyDirectMemory"`
- PeakJvmDirectMemory int `json:"peakJvmDirectMemory"`
- PeakSparkDirectMemoryOverLimit int `json:"peakSparkDirectMemoryOverLimit"`
- PeakTotalOffHeapMemory int `json:"peakTotalOffHeapMemory"`
- SubmissionTime string `json:"submissionTime"`
- FirstTaskLaunchedTime string `json:"firstTaskLaunchedTime"`
- CompletionTime string `json:"completionTime"`
- ExecutorDeserializeTime int `json:"executorDeserializeTime"`
- ExecutorDeserializeCpuTime int `json:"executorDeserializeCpuTime"`
- ExecutorRunTime int `json:"executorRunTime"`
- ExecutorCpuTime int `json:"executorCpuTime"`
- ResultSize int `json:"resultSize"`
- JvmGcTime int `json:"jvmGcTime"`
- ResultSerializationTime int `json:"resultSerializationTime"`
- MemoryBytesSpilled int `json:"memoryBytesSpilled"`
- DiskBytesSpilled int `json:"diskBytesSpilled"`
- PeakExecutionMemory int `json:"peakExecutionMemory"`
- InputBytes int `json:"inputBytes"`
- InputRecords int `json:"inputRecords"`
- OutputBytes int `json:"outputBytes"`
- OutputRecords int `json:"outputRecords"`
- ShuffleRemoteBlocksFetched int `json:"shuffleRemoteBlocksFetched"`
- ShuffleLocalBlocksFetched int `json:"shuffleLocalBlocksFetched"`
- ShuffleFetchWaitTime int `json:"shuffleFetchWaitTime"`
- ShuffleRemoteBytesRead int `json:"shuffleRemoteBytesRead"`
- ShuffleRemoteBytesReadToDisk int `json:"shuffleRemoteBytesReadToDisk"`
- ShuffleLocalBytesRead int `json:"shuffleLocalBytesRead"`
- ShuffleReadBytes int `json:"shuffleReadBytes"`
- ShuffleReadRecords int `json:"shuffleReadRecords"`
- ShuffleCorruptMergedBlockChunks int `json:"shuffleCorruptMergedBlockChunks"`
- ShuffleMergedFetchFallbackCount int `json:"shuffleMergedFetchFallbackCount"`
- ShuffleMergedRemoteBlocksFetched int `json:"shuffleMergedRemoteBlocksFetched"`
- ShuffleMergedLocalBlocksFetched int `json:"shuffleMergedLocalBlocksFetched"`
- ShuffleMergedRemoteChunksFetched int `json:"shuffleMergedRemoteChunksFetched"`
- ShuffleMergedLocalChunksFetched int `json:"shuffleMergedLocalChunksFetched"`
- ShuffleMergedRemoteBytesRead int `json:"shuffleMergedRemoteBytesRead"`
- ShuffleMergedLocalBytesRead int `json:"shuffleMergedLocalBytesRead"`
- ShuffleRemoteReqsDuration int `json:"shuffleRemoteReqsDuration"`
- ShuffleMergedRemoteReqsDuration int `json:"shuffleMergedRemoteReqsDuration"`
- ShuffleWriteBytes int `json:"shuffleWriteBytes"`
- ShuffleWriteTime int `json:"shuffleWriteTime"`
- ShuffleWriteRecords int `json:"shuffleWriteRecords"`
- Name string `json:"name"`
- Description string `json:"description"`
- Details string `json:"details"`
- SchedulingPool string `json:"schedulingPool"`
- RddIds []int `json:"rddIds"`
- AccumulatorUpdates []interface{} `json:"accumulatorUpdates"`
- KilledTasksSummary struct {
- } `json:"killedTasksSummary"`
- ResourceProfileId int `json:"resourceProfileId"`
- PeakExecutorMetrics struct {
- JVMHeapMemory int `json:"JVMHeapMemory"`
- JVMOffHeapMemory int `json:"JVMOffHeapMemory"`
- OnHeapExecutionMemory int `json:"OnHeapExecutionMemory"`
- OffHeapExecutionMemory int `json:"OffHeapExecutionMemory"`
- OnHeapStorageMemory int `json:"OnHeapStorageMemory"`
- OffHeapStorageMemory int `json:"OffHeapStorageMemory"`
- OnHeapUnifiedMemory int `json:"OnHeapUnifiedMemory"`
- OffHeapUnifiedMemory int `json:"OffHeapUnifiedMemory"`
- DirectPoolMemory int `json:"DirectPoolMemory"`
- MappedPoolMemory int `json:"MappedPoolMemory"`
- NettyDirectMemory int `json:"NettyDirectMemory"`
- JvmDirectMemory int `json:"JvmDirectMemory"`
- SparkDirectMemoryOverLimit int `json:"SparkDirectMemoryOverLimit"`
- TotalOffHeapMemory int `json:"TotalOffHeapMemory"`
- ProcessTreeJVMVMemory int `json:"ProcessTreeJVMVMemory"`
- ProcessTreeJVMRSSMemory int `json:"ProcessTreeJVMRSSMemory"`
- ProcessTreePythonVMemory int `json:"ProcessTreePythonVMemory"`
- ProcessTreePythonRSSMemory int `json:"ProcessTreePythonRSSMemory"`
- ProcessTreeOtherVMemory int `json:"ProcessTreeOtherVMemory"`
- ProcessTreeOtherRSSMemory int `json:"ProcessTreeOtherRSSMemory"`
- MinorGCCount int `json:"MinorGCCount"`
- MinorGCTime int `json:"MinorGCTime"`
- MajorGCCount int `json:"MajorGCCount"`
- MajorGCTime int `json:"MajorGCTime"`
- TotalGCTime int `json:"TotalGCTime"`
- } `json:"peakExecutorMetrics"`
- IsShufflePushEnabled bool `json:"isShufflePushEnabled"`
- ShuffleMergersCount int `json:"shuffleMergersCount"`
-}
-
-func GetSparkApplications(url string, headers map[string]string) (ApplicationsResponse, error) {
- req, _ := http.NewRequest("GET", url+"/api/v1/applications", nil)
-
- for key, value := range headers {
- req.Header.Add(key, value)
- }
-
- client := &http.Client{}
- resp, err := client.Do(req)
- if err != nil {
- log.Println("Error on response.\n[ERROR] -", err)
- }
-
- body, _ := io.ReadAll(resp.Body)
-
- var apps ApplicationsResponse
- if err := json.Unmarshal(body, &apps); err != nil {
- log.Printf("Error: %s", err)
- }
- return apps, nil
-}
diff --git a/integration/utils/utils.go b/integration/utils/utils.go
deleted file mode 100644
index c8171fd..0000000
--- a/integration/utils/utils.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package utils
-
-import (
- "encoding/json"
- log "github.com/sirupsen/logrus"
- "newrelic/multienv/pkg/model"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-func StringInSlice(str string, list []string) bool {
- for _, v := range list {
- if v == str {
- return true
- }
- }
- return false
-}
-
-func Flatten(input map[string]interface{}, currentKey string, result map[string]interface{}) map[string]interface{} {
- for key, value := range input {
- newKey := key
- if currentKey != "" {
- newKey = currentKey + "." + key
- }
-
- // check if the value is another nested map
- if nestedMap, ok := value.(map[string]interface{}); ok {
- result = Flatten(nestedMap, newKey, result)
- } else {
- result[newKey] = value
- }
- }
- return result
-}
-
-func ConvertJsonToMap(jsonData []byte) (map[string]interface{}, error) {
- var JSONObject map[string]interface{}
- err := json.Unmarshal(jsonData, &JSONObject)
- if err != nil {
- return nil, err
- }
- return JSONObject, nil
-}
-
-func CreateMetricModels(prefix string, e reflect.Value, tags map[string]interface{}) []model.MeltModel {
-
- out := make([]model.MeltModel, 0)
-
- for i := 0; i < e.NumField(); i++ {
-
- var metricValue float64
- var metricName string
- mtime := time.Now()
-
- if e.Field(i).Kind() == reflect.Struct {
- log.Trace("encountered nested structure ")
- out = append(out, CreateMetricModels(prefix, e.Field(i), tags)...)
- continue
- }
-
- metricName = prefix + e.Type().Field(i).Name
-
- switch n := e.Field(i).Interface().(type) {
- case int:
- metricValue = float64(n)
- case int64:
- metricValue = float64(n)
- case uint64:
- metricValue = float64(n)
- case float64:
- metricValue = n
- case bool:
- metricValue = float64(0)
- if n {
- metricValue = float64(1)
- }
- default:
- log.Trace("setMetrics :skipping metric: ", n, metricName, metricValue)
- }
-
- meltMetric := model.MakeGaugeMetric(
- metricName, model.Numeric{FltVal: metricValue}, mtime)
-
- tags["instrumentation.name"] = "newrelic-databricks-integration"
- meltMetric.Attributes = tags
-
- out = append(out, meltMetric)
- }
-
- return out
-
-}
-
-func SetTags(prefix string, e reflect.Value, tags map[string]interface{}, metricTags map[string]interface{}) {
-
- for k, v := range tags {
- metricTags[k] = v
-
- }
-
- if e.Kind() == reflect.Interface || e.Kind() == reflect.Ptr {
- e = e.Elem()
- }
-
- log.Println(e)
- switch e.Kind() {
- case reflect.Struct:
- for i := 0; i < e.NumField(); i++ {
- var mname string
- mname = prefix + e.Type().Field(i).Name
- switch n := e.Field(i).Interface().(type) {
- case string:
- log.Trace("setTags : adding tags ", mname, "=", n)
- metricTags[mname] = n
- case []int:
- metricTags[mname] = SplitToString(n, ",")
- default:
- // Handle other cases if needed
- }
- }
- case reflect.Map:
- for _, key := range e.MapKeys() {
- var mname string
- mname = prefix + key.String()
- val := e.MapIndex(key)
- switch n := val.Interface().(type) {
- case string:
- log.Trace("setTags : adding tags ", mname, "=", n)
- metricTags[mname] = n
- case []int:
- metricTags[mname] = SplitToString(n, ",")
- default:
- // Handle other cases if needed
- }
- }
- default:
- log.Println("Unsupported kind:", e.Kind())
- }
-}
-
-func SplitToString(a []int, sep string) string {
- if len(a) == 0 {
- return ""
- }
-
- b := make([]string, len(a))
- for i, v := range a {
- b[i] = strconv.Itoa(v)
- }
- return strings.Join(b, sep)
-}
diff --git a/internal/databricks/databricks.go b/internal/databricks/databricks.go
new file mode 100644
index 0000000..1aa30e7
--- /dev/null
+++ b/internal/databricks/databricks.go
@@ -0,0 +1,222 @@
+package databricks
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ databricksSdk "github.com/databricks/databricks-sdk-go"
+ databricksSdkConfig "github.com/databricks/databricks-sdk-go/config"
+ databricksSdkCompute "github.com/databricks/databricks-sdk-go/service/compute"
+ "github.com/newrelic-experimental/newrelic-databricks-integration/internal/spark"
+
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/connectors"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/log"
+ "github.com/spf13/viper"
+)
+
+type DatabricksAuthenticator struct {
+ accessToken string
+}
+
+func NewDatabricksAuthenticator(accessToken string) (
+ *DatabricksAuthenticator,
+) {
+ return &DatabricksAuthenticator{ accessToken }
+}
+
+func (b *DatabricksAuthenticator) Authenticate(
+ connector *connectors.HttpConnector,
+ req *http.Request,
+) error {
+ req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", b.accessToken))
+
+ return nil
+}
+
+func InitPipelines(
+ ctx context.Context,
+ i *integration.LabsIntegration,
+) error {
+ // Databricks config
+ databricksConfig := &databricksSdk.Config{}
+
+ // Should we collect Spark metrics?
+ sparkMetrics := true
+ if viper.IsSet("databricks.sparkMetrics") {
+ sparkMetrics = viper.GetBool("databricks.sparkMetrics")
+ }
+
+ // Databricks credentials
+ //
+ // If we are not collecting Spark metrics, we support PAT authentication
+ // using our config file or environment variables, or any supported SDK
+ // authentication method supported by the SDK itself, e.g. via the
+ // environment or .databrickscfg or cloud specific methods.
+ //
+ // If we are collecting Spark metrics, we only support PAT authentication
+ // via our config file or environment variables for host and token because
+ // I'm not sure how we would use other forms to get through the driver to
+ // the Spark UI. HTTP Basic might be a possibility but that is a @TODO.
+
+ databricksAccessToken := viper.GetString("databricks.accessToken")
+ if databricksAccessToken != "" {
+ databricksConfig.Token = databricksAccessToken
+ databricksConfig.Credentials = databricksSdkConfig.PatCredentials{}
+ } else if sparkMetrics {
+ return fmt.Errorf("missing databricks personal access token")
+ }
+
+ // Workspace Host
+ databricksWorkspaceHost := viper.GetString("databricks.workspaceHost")
+ if databricksWorkspaceHost != "" {
+ databricksConfig.Host = databricksWorkspaceHost
+ } else if sparkMetrics {
+ return fmt.Errorf("missing databricks workspace host")
+ }
+
+ w, err := databricksSdk.NewWorkspaceClient(databricksConfig)
+ if err != nil {
+ return err
+ }
+
+ if sparkMetrics {
+ // Initialize the Spark pipelines
+ authenticator := NewDatabricksAuthenticator(databricksAccessToken)
+
+ all, err := w.Clusters.ListAll(
+ ctx,
+ databricksSdkCompute.ListClustersRequest{},
+ )
+ if err != nil {
+ return fmt.Errorf("failed to list clusters: %w", err)
+ }
+
+ for _, c := range all {
+ if c.State == databricksSdkCompute.StateRunning {
+ if c.ClusterSource == databricksSdkCompute.ClusterSourceUi ||
+ c.ClusterSource == databricksSdkCompute.ClusterSourceApi {
+
+ /// resolve the spark context UI URL for the cluster
+ log.Debugf(
+ "resolving Spark context UI URL for cluster %s",
+ c.ClusterName,
+ )
+ sparkContextUiUrl, err := getSparkContextUiUrlForCluster(
+ ctx,
+ w,
+ &c,
+ databricksWorkspaceHost,
+ )
+ if err != nil {
+ return err
+ }
+
+ // Initialize spark pipelines
+ log.Debugf(
+ "initializing Spark pipeline for cluster %s with spark context UI URL %s",
+ c.ClusterName,
+ sparkContextUiUrl,
+ )
+ err = spark.InitPipelinesForContext(
+ i,
+ sparkContextUiUrl,
+ authenticator,
+ map[string] string {
+ "clusterProvider": "databricks",
+ "databricksClusterId": c.ClusterId,
+ "databricksClusterName": c.ClusterName,
+ },
+ )
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+
+ // @TODO: initialize databricks pipelines here
+
+ return nil
+}
+
+func getSparkContextUiUrlForCluster(
+ ctx context.Context,
+ w *databricksSdk.WorkspaceClient,
+ c *databricksSdkCompute.ClusterDetails,
+ databricksWorkspaceHost string,
+) (string, error) {
+ // @see https://databrickslabs.github.io/overwatch/assets/_index/realtime_helpers.html
+
+ clusterId := c.ClusterId
+
+ waitContextStatus, err := w.CommandExecution.Create(
+ ctx,
+ databricksSdkCompute.CreateContext{
+ ClusterId: clusterId,
+ Language: databricksSdkCompute.LanguagePython,
+ },
+ )
+ if err != nil {
+ return "", err
+ }
+
+ execContext, err := waitContextStatus.Get()
+ if err != nil {
+ return "", err
+ }
+
+ cmd := databricksSdkCompute.Command{
+ ClusterId: clusterId,
+ Command: `
+ print(f'{spark.conf.get("spark.databricks.clusterUsageTags.clusterOwnerOrgId")}')
+ print(f'{spark.conf.get("spark.ui.port")}')
+ `,
+ ContextId: execContext.Id,
+ Language: databricksSdkCompute.LanguagePython,
+ }
+
+ waitCommandStatus, err := w.CommandExecution.Execute(
+ ctx,
+ cmd,
+ )
+ if err != nil {
+ return "", err
+ }
+
+ resp, err := waitCommandStatus.Get()
+ if err != nil {
+ return "", err
+ }
+
+ data, ok := resp.Results.Data.(string);
+ if !ok {
+ return "", fmt.Errorf("command result is not a string value")
+ }
+
+ vals := strings.Split(data, "\n")
+ if len(vals) != 2 {
+ return "", fmt.Errorf("invalid command result")
+ }
+
+ if vals[0] == "" || vals[1] == "" {
+ return "", fmt.Errorf("empty command results")
+ }
+
+ // @TODO: I think this URL pattern only works for multi-tenant accounts.
+ // We may need a flag for single tenant accounts and use the o/0 form
+ // shown on the overwatch site.
+
+ url := fmt.Sprintf(
+ "https://%s/driver-proxy-api/o/%s/%s/%s",
+ databricksWorkspaceHost,
+ vals[0],
+ clusterId,
+ vals[1],
+ )
+
+ return url, nil
+}
diff --git a/internal/databricks/receiver.go b/internal/databricks/receiver.go
new file mode 100644
index 0000000..108c196
--- /dev/null
+++ b/internal/databricks/receiver.go
@@ -0,0 +1,32 @@
+package databricks
+
+import (
+ "context"
+
+ databricksSdk "github.com/databricks/databricks-sdk-go"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/model"
+)
+
+type DatabricksSdkReceiver struct {
+ w *databricksSdk.WorkspaceClient
+}
+
+func NewDatabricksSdkReceiver() (*DatabricksSdkReceiver, error) {
+ w, err := databricksSdk.NewWorkspaceClient()
+ if err != nil {
+ return nil, err
+ }
+
+ return &DatabricksSdkReceiver{ w }, nil
+}
+
+func (d *DatabricksSdkReceiver) GetId() string {
+ return "databricks-sdk-receiver"
+}
+
+func (d *DatabricksSdkReceiver) PollMetrics(
+ context context.Context,
+ writer chan <- model.Metric,
+) error {
+ return nil
+}
diff --git a/internal/spark/receiver.go b/internal/spark/receiver.go
new file mode 100644
index 0000000..ac2018f
--- /dev/null
+++ b/internal/spark/receiver.go
@@ -0,0 +1,1728 @@
+package spark
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "maps"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/connectors"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/log"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/model"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/pipeline"
+)
+
+type SparkMetricsReceiver struct {
+ i *integration.LabsIntegration
+ sparkContextUiUrl string
+ authenticator connectors.HttpAuthenticator
+ metricPrefix string
+ tags map[string]string
+}
+
+func NewSparkMetricsReceiver(
+ i *integration.LabsIntegration,
+ sparkContextUiUrl string,
+ authenticator connectors.HttpAuthenticator,
+ metricPrefix string,
+ tags map[string]string,
+) pipeline.MetricsReceiver {
+ r := &SparkMetricsReceiver{
+ i,
+ sparkContextUiUrl,
+ authenticator,
+ metricPrefix,
+ tags,
+ }
+
+ return r
+}
+
+func (s *SparkMetricsReceiver) GetId() string {
+ return "spark-metrics-receiver"
+}
+
+func (s *SparkMetricsReceiver) PollMetrics(
+ ctx context.Context,
+ writer chan <- model.Metric,
+) error {
+ sparkApps, err := getSparkApplications(s.sparkContextUiUrl, s.authenticator)
+ if err != nil {
+ return err
+ }
+
+ wg := sync.WaitGroup{}
+ errs := []error{}
+
+ for _, sparkApp := range sparkApps {
+ wg.Add(1)
+ go func(app *SparkApplication) {
+ defer wg.Done()
+
+ err := collectSparkAppExecutorMetrics(
+ s.sparkContextUiUrl,
+ app,
+ s.authenticator,
+ s.metricPrefix,
+ s.tags,
+ writer,
+ )
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ err = collectSparkAppJobMetrics(
+ s.sparkContextUiUrl,
+ app,
+ s.authenticator,
+ s.metricPrefix,
+ s.tags,
+ writer,
+ )
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ err = collectSparkAppStageMetrics(
+ s.sparkContextUiUrl,
+ app,
+ s.authenticator,
+ s.metricPrefix,
+ s.tags,
+ writer,
+ )
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ err = collectSparkAppRDDMetrics(
+ s.sparkContextUiUrl,
+ app,
+ s.authenticator,
+ s.metricPrefix,
+ s.tags,
+ writer,
+ )
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }(&sparkApp)
+ }
+
+ wg.Wait()
+
+ return errors.Join(errs...)
+}
+
+func getSparkApplications(
+ sparkContextUiUrl string,
+ authenticator connectors.HttpAuthenticator,
+) ([]SparkApplication, error) {
+ sparkApps := []SparkApplication{}
+
+ err := makeRequest(
+ sparkContextUiUrl + "/api/v1/applications",
+ authenticator,
+ &sparkApps,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return sparkApps, nil
+}
+
+func collectSparkAppExecutorMetrics(
+ sparkContextUiUrl string,
+ sparkApp *SparkApplication,
+ authenticator connectors.HttpAuthenticator,
+ metricPrefix string,
+ tags map[string]string,
+ writer chan <- model.Metric,
+) error {
+ executors := []SparkExecutor{}
+
+ err := makeRequest(
+ sparkContextUiUrl + "/api/v1/applications/" + sparkApp.Id + "/executors",
+ authenticator,
+ &executors,
+ )
+ if err != nil {
+ return err
+ }
+
+ for _, executor := range executors {
+ log.Debugf("processing executor %s", executor.Id)
+
+ attrs := makeAppAttributesMap(
+ sparkContextUiUrl,
+ sparkApp,
+ tags,
+ )
+
+ attrs["sparkAppExecutorId"] = executor.Id
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.rddBlocks",
+ executor.RddBlocks,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.memoryUsed",
+ executor.MemoryUsed,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.diskUsed",
+ executor.DiskUsed,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalCores",
+ executor.TotalCores,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.maxTasks",
+ executor.MaxTasks,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.activeTasks",
+ executor.ActiveTasks,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.failedTasks",
+ executor.FailedTasks,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.completedTasks",
+ executor.CompletedTasks,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalTasks",
+ executor.TotalTasks,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalDuration",
+ executor.TotalDuration,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalGCTime",
+ executor.TotalGCTime,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalInputBytes",
+ executor.TotalInputBytes,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalShuffleRead",
+ executor.TotalShuffleRead,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.totalShuffleWrite",
+ executor.TotalShuffleWrite,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.executor.maxMemory",
+ executor.MaxMemory,
+ attrs,
+ writer,
+ )
+
+ writeMemoryMetrics(
+ metricPrefix + "app.executor.memory.",
+ &executor.MemoryMetrics,
+ attrs,
+ writer,
+ )
+
+ writePeakMemoryMetrics(
+ metricPrefix + "app.executor.memory.peak.",
+ &executor.PeakMemoryMetrics,
+ attrs,
+ writer,
+ )
+ }
+
+ return nil
+}
+
+func collectSparkAppJobMetrics(
+ sparkContextUiUrl string,
+ sparkApp *SparkApplication,
+ authenticator connectors.HttpAuthenticator,
+ metricPrefix string,
+ tags map[string]string,
+ writer chan <- model.Metric,
+) error {
+ jobs := []SparkJob{}
+
+ err := makeRequest(
+ sparkContextUiUrl + "/api/v1/applications/" + sparkApp.Id + "/jobs",
+ authenticator,
+ &jobs,
+ )
+ if err != nil {
+ return err
+ }
+
+ jobsRunning := 0
+ jobsLost := 0
+ jobsSucceeded := 0
+ jobsFailed := 0
+
+ for _, job := range jobs {
+ log.Debugf("processing job %d (%s)", job.JobId, job.Name)
+
+ attrs := makeAppAttributesMap(
+ sparkContextUiUrl,
+ sparkApp,
+ tags,
+ )
+
+ attrs["sparkAppJobId"] = job.JobId
+ // The job name and job group cause very high cardinality for Databricks
+ // Notebook runs.
+ //attrs["sparkAppJobName"] = job.Name
+ //attrs["sparkAppJobGroup"] = job.JobGroup
+ attrs["sparkAppJobStatus"] = job.Status
+
+ jobStatus := strings.ToLower(job.Status)
+
+ if jobStatus == "running" {
+ jobsRunning += 1
+ } else if jobStatus == "unknown" {
+ jobsLost += 1
+ } else if jobStatus == "succeeded" {
+ jobsSucceeded += 1
+ } else if jobStatus == "failed" {
+ jobsFailed += 1
+ }
+
+ // Write all the things.
+
+ writeGauge(
+ metricPrefix,
+ "app.job.indices.completed",
+ job.NumCompletedIndices,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "active"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.stages",
+ job.NumActiveStages,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "completed"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.stages",
+ job.NumCompletedStages,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "skipped"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.stages",
+ job.NumSkippedStages,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "failed"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.stages",
+ job.NumFailedStages,
+ attrs,
+ writer,
+ )
+
+ delete(attrs, "sparkAppStageStatus")
+
+ attrs["sparkAppTaskStatus"] = "active"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.tasks",
+ job.NumActiveTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "completed"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.tasks",
+ job.NumCompletedTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "skipped"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.tasks",
+ job.NumSkippedTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "failed"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.tasks",
+ job.NumFailedTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "killed"
+
+ writeGauge(
+ metricPrefix,
+ "app.job.tasks",
+ job.NumKilledTasks,
+ attrs,
+ writer,
+ )
+
+ delete(attrs, "sparkAppTaskStatus")
+ }
+
+ attrs := makeAppAttributesMap(
+ sparkContextUiUrl,
+ sparkApp,
+ tags,
+ )
+
+ attrs["sparkAppJobStatus"] = "running"
+
+ writeGauge(
+ metricPrefix,
+ "app.jobs",
+ jobsRunning,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppJobStatus"] = "lost"
+
+ writeGauge(
+ metricPrefix,
+ "app.jobs",
+ jobsLost,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppJobStatus"] = "succeeded"
+
+ writeGauge(
+ metricPrefix,
+ "app.jobs",
+ jobsSucceeded,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppJobStatus"] = "failed"
+
+ writeGauge(
+ metricPrefix,
+ "app.jobs",
+ jobsFailed,
+ attrs,
+ writer,
+ )
+
+ return nil
+}
+
+func collectSparkAppStageMetrics(
+ sparkContextUiUrl string,
+ sparkApp *SparkApplication,
+ authenticator connectors.HttpAuthenticator,
+ metricPrefix string,
+ tags map[string]string,
+ writer chan <- model.Metric,
+) error {
+ stages := []SparkStage{}
+
+ err := makeRequest(
+ sparkContextUiUrl + "/api/v1/applications/" + sparkApp.Id + "/stages?details=true",
+ authenticator,
+ &stages,
+ )
+ if err != nil {
+ return err
+ }
+
+ stagesActive := 0
+ stagesPending := 0
+ stagesComplete := 0
+ stagesFailed := 0
+ stagesSkipped := 0
+
+ for _, stage := range stages {
+ log.Debugf("processing stage %d (%s)", stage.StageId, stage.Name)
+
+ stageStatus := strings.ToLower(stage.Status)
+
+ attrs := makeAppAttributesMap(
+ sparkContextUiUrl,
+ sparkApp,
+ tags,
+ )
+
+ attrs["sparkAppStageName"] = stage.Name
+ attrs["sparkAppStageStatus"] = stageStatus
+ // @TODO: The attributes below may cause high cardinality. Further
+ // investigation is needed.
+ //attrs["sparkAppStageId"] = stage.StageId
+ //attrs["sparkAppStageAttemptId"] = stage.AttemptId
+ //attrs["sparkAppStageSchedulingPool"] = stage.SchedulingPool
+ //attrs["sparkAppStageResourceProfileId"] = stage.ResourceProfileId
+
+ if stageStatus == "active" {
+ stagesActive += 1
+ } else if stageStatus == "pending" {
+ stagesPending += 1
+ } else if stageStatus == "complete" {
+ stagesComplete += 1
+ } else if stageStatus == "failed" {
+ stagesFailed += 1
+ } else if stageStatus == "skipped" {
+ stagesSkipped += 1
+ }
+
+ // Write all the things.
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.peakNettyDirectMemory",
+ stage.PeakNettyDirectMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.peakJvmDirectMemory",
+ stage.PeakJvmDirectMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.peakSparkDirectMemoryOverLimit",
+ stage.PeakSparkDirectMemoryOverLimit,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.peakTotalOffHeapMemory",
+ stage.PeakTotalOffHeapMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.executor.deserializeTime",
+ stage.ExecutorDeserializeTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.executor.deserializeCpuTime",
+ stage.ExecutorDeserializeCpuTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.executor.runTime",
+ stage.ExecutorRunTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.executor.cpuTime",
+ stage.ExecutorCpuTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.resultSize",
+ stage.ResultSize,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.jvmGcTime",
+ stage.JvmGcTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.resultSerializationTime",
+ stage.ResultSerializationTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.memoryBytesSpilled",
+ stage.MemoryBytesSpilled,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.diskBytesSpilled",
+ stage.DiskBytesSpilled,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.peakExecutionMemory",
+ stage.PeakExecutionMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.inputBytes",
+ stage.InputBytes,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.inputRecords",
+ stage.InputRecords,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.outputBytes",
+ stage.OutputBytes,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.outputRecords",
+ stage.OutputRecords,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.remoteBlocksFetched",
+ stage.ShuffleRemoteBlocksFetched,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.localBlocksFetched",
+ stage.ShuffleLocalBlocksFetched,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.fetchWaitTime",
+ stage.ShuffleFetchWaitTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.remoteBytesRead",
+ stage.ShuffleRemoteBytesRead,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.remoteBytesReadToDisk",
+ stage.ShuffleRemoteBytesReadToDisk,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.localBytesRead",
+ stage.ShuffleLocalBytesRead,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.readBytes",
+ stage.ShuffleReadBytes,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.readRecords",
+ stage.ShuffleReadRecords,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.corruptMergedClockChunks",
+ stage.ShuffleCorruptMergedBlockChunks,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedFetchFallbackCount",
+ stage.ShuffleMergedFetchFallbackCount,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedRemoteBlocksFetched",
+ stage.ShuffleMergedRemoteBlocksFetched,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedLocalBlocksFetched",
+ stage.ShuffleMergedLocalBlocksFetched,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedRemoteChunksFetched",
+ stage.ShuffleMergedRemoteChunksFetched,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedLocalChunksFetched",
+ stage.ShuffleMergedLocalChunksFetched,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedRemoteBytesRead",
+ stage.ShuffleMergedRemoteBytesRead,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedLocalBytesRead",
+ stage.ShuffleMergedLocalBytesRead,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.remoteReqsDuration",
+ stage.ShuffleRemoteReqsDuration,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergedRemoteReqsDuration",
+ stage.ShuffleMergedRemoteReqsDuration,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.writeBytes",
+ stage.ShuffleWriteBytes,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.writeTime",
+ stage.ShuffleWriteTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.writeRecords",
+ stage.ShuffleWriteRecords,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.stage.shuffle.mergersCount",
+ stage.ShuffleMergersCount,
+ attrs,
+ writer,
+ )
+
+ for _, task := range stage.Tasks {
+ writeStageTaskMetrics(
+ metricPrefix + "app.stage.task.",
+ &task,
+ attrs,
+ writer,
+ )
+ }
+
+ writePeakMemoryMetrics(
+ metricPrefix + "app.stage.memory.peak.",
+ &stage.PeakMemoryMetrics,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.tasks.total",
+ stage.NumTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "active"
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.tasks",
+ stage.NumActiveTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "complete"
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.tasks",
+ stage.NumCompleteTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "failed"
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.tasks",
+ stage.NumFailedTasks,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppTaskStatus"] = "killed"
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.tasks",
+ stage.NumKilledTasks,
+ attrs,
+ writer,
+ )
+
+ delete(attrs, "sparkAppTaskStatus")
+
+ writeGauge(
+ metricPrefix,
+ "app.stage.indices.completed",
+ stage.NumCompletedIndices,
+ attrs,
+ writer,
+ )
+ }
+
+ attrs := makeAppAttributesMap(
+ sparkContextUiUrl,
+ sparkApp,
+ tags,
+ )
+
+ attrs["sparkAppStageStatus"] = "active"
+
+ writeGauge(
+ metricPrefix,
+ "app.stages",
+ stagesActive,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "pending"
+
+ writeGauge(
+ metricPrefix,
+ "app.stages",
+ stagesPending,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "complete"
+
+ writeGauge(
+ metricPrefix,
+ "app.stages",
+ stagesComplete,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "failed"
+
+ writeGauge(
+ metricPrefix,
+ "app.stages",
+ stagesFailed,
+ attrs,
+ writer,
+ )
+
+ attrs["sparkAppStageStatus"] = "skipped"
+
+ writeGauge(
+ metricPrefix,
+ "app.stages",
+ stagesSkipped,
+ attrs,
+ writer,
+ )
+
+ return nil
+}
+
+func writeStageTaskMetrics(
+ metricPrefix string,
+ task *SparkTask,
+ attrs map[string]interface{},
+ writer chan <- model.Metric,
+) {
+ log.Debugf("processing task %d", task.TaskId)
+
+ taskStatus := strings.ToLower(task.Status)
+
+ taskMetricAttrs := maps.Clone(attrs)
+
+ taskMetricAttrs["sparkAppTaskExecutorId"] = task.ExecutorId
+ taskMetricAttrs["sparkAppTaskStatus"] = taskStatus
+ taskMetricAttrs["sparkAppTaskLocality"] = task.TaskLocality
+ taskMetricAttrs["sparkAppTaskSpeculative"] = task.Speculative
+ // @TODO: The attributes below may cause high cardinality. Further
+ // investigation is needed.
+ //attrs["sparkAppTaskId"] = task.TaskId
+ //attrs["sparkAppTaskAttempt"] = task.Attempt
+ //attrs["sparkAppTaskPartitionId"] = task.PartitionId
+
+ writeGauge(
+ metricPrefix,
+ "duration",
+ task.Duration,
+ taskMetricAttrs,
+ writer,
+ )
+
+ taskMetrics := task.TaskMetrics
+
+ writeGauge(
+ metricPrefix,
+ "executorDeserializeTime",
+ taskMetrics.ExecutorDeserializeTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "executorDeserializeCpuTime",
+ taskMetrics.ExecutorDeserializeCpuTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "executorRunTime",
+ taskMetrics.ExecutorRunTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "executorCpuTime",
+ taskMetrics.ExecutorCpuTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "resultSize",
+ taskMetrics.ResultSize,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "jvmGcTime",
+ taskMetrics.JvmGcTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "resultSerializationTime",
+ taskMetrics.ResultSerializationTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "memoryBytesSpilled",
+ taskMetrics.MemoryBytesSpilled,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "diskBytesSpilled",
+ taskMetrics.DiskBytesSpilled,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "peakExecutionMemory",
+ taskMetrics.PeakExecutionMemory,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "input.bytesRead",
+ taskMetrics.InputMetrics.BytesRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "input.recordsRead",
+ taskMetrics.InputMetrics.RecordsRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "output.bytesWritten",
+ taskMetrics.OutputMetrics.BytesWritten,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "output.recordsWritten",
+ taskMetrics.OutputMetrics.RecordsWritten,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.remoteBlocksFetched",
+ taskMetrics.ShuffleReadMetrics.RemoteBlocksFetched,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.localBlocksFetched",
+ taskMetrics.ShuffleReadMetrics.LocalBlocksFetched,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.fetchWaitTime",
+ taskMetrics.ShuffleReadMetrics.FetchWaitTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.remoteBytesRead",
+ taskMetrics.ShuffleReadMetrics.RemoteBytesRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.remoteBytesReadToDisk",
+ taskMetrics.ShuffleReadMetrics.RemoteBytesReadToDisk,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.localBytesRead",
+ taskMetrics.ShuffleReadMetrics.LocalBytesRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.recordsRead",
+ taskMetrics.ShuffleReadMetrics.RecordsRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.remoteReqsDuration",
+ taskMetrics.ShuffleReadMetrics.RemoteReqsDuration,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.corruptMergedBlockChunks",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.CorruptMergedBlockChunks,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.mergedFetchFallbackCount",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.MergedFetchFallbackCount,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.remoteMergedBlocksFetched",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.RemoteMergedBlocksFetched,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.localMergedBlocksFetched",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.LocalMergedBlocksFetched,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.remoteMergedChunksFetched",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.RemoteMergedChunksFetched,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.localMergedChunksFetched",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.LocalMergedChunksFetched,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.remoteMergedBytesRead",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.RemoteMergedBytesRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.localMergedBytesRead",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.LocalMergedBytesRead,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.read.push.remoteMergedReqsDuration",
+ taskMetrics.ShuffleReadMetrics.SufflePushReadMetrics.RemoteMergedReqsDuration,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.write.bytesWritten",
+ taskMetrics.ShuffleWriteMetrics.BytesWritten,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.write.writeTime",
+ taskMetrics.ShuffleWriteMetrics.WriteTime,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "shuffle.write.recordsWritten",
+ taskMetrics.ShuffleWriteMetrics.RecordsWritten,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "photon.offHeapMinMemorySize",
+ taskMetrics.PhotonMemoryMetrics.OffHeapMinMemorySize,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "photon.offHeapMaxMemorySize",
+ taskMetrics.PhotonMemoryMetrics.OffHeapMaxMemorySize,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "photon.photonBufferPoolMinMemorySize",
+ taskMetrics.PhotonMemoryMetrics.PhotonBufferPoolMinMemorySize,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "photon.photonBufferPoolMaxMemorySize",
+ taskMetrics.PhotonMemoryMetrics.PhotonBufferPoolMaxMemorySize,
+ taskMetricAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "photon.photonizedTaskTimeNs",
+ taskMetrics.PhotonizedTaskTimeNs,
+ taskMetricAttrs,
+ writer,
+ )
+}
+
+func collectSparkAppRDDMetrics(
+ sparkContextUiUrl string,
+ sparkApp *SparkApplication,
+ authenticator connectors.HttpAuthenticator,
+ metricPrefix string,
+ tags map[string]string,
+ writer chan <- model.Metric,
+) error {
+ rdds := []SparkRDD{}
+
+ err := makeRequest(
+ sparkContextUiUrl + "/api/v1/applications/" + sparkApp.Id + "/storage/rdd",
+ authenticator,
+ &rdds,
+ )
+ if err != nil {
+ return err
+ }
+
+ for _, rdd := range rdds {
+ log.Debugf("processing rdd %d", rdd.Id)
+
+ attrs := makeAppAttributesMap(
+ sparkContextUiUrl,
+ sparkApp,
+ tags,
+ )
+
+ attrs["sparkAppRDDId"] = rdd.Id
+ attrs["sparkAppRDDName"] = rdd.Name
+
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.partitions",
+ rdd.NumPartitions,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.cachedPartitions",
+ rdd.NumCachedPartitions,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.memory.used",
+ rdd.MemoryUsed,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.disk.used",
+ rdd.DiskUsed,
+ attrs,
+ writer,
+ )
+
+ for index, distribution := range rdd.DataDistribution {
+ rddDistributionAttrs := maps.Clone(attrs)
+
+ rddDistributionAttrs["sparkAppRddDistributionIndex"] = index
+
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.memory.used" ,
+ distribution.MemoryUsed,
+ rddDistributionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.memory.remaining" ,
+ distribution.MemoryRemaining,
+ rddDistributionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.disk.used" ,
+ distribution.DiskUsed,
+ rddDistributionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.memory.usedOnHeap" ,
+ distribution.OnHeapMemoryUsed,
+ rddDistributionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.memory.usedOffHeap" ,
+ distribution.OffHeapMemoryUsed,
+ rddDistributionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.memory.remainingOnHeap" ,
+ distribution.OnHeapMemoryRemaining,
+ rddDistributionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.distribution.memory.remainingOffHeap" ,
+ distribution.OffHeapMemoryRemaining,
+ rddDistributionAttrs,
+ writer,
+ )
+ }
+
+ for _, partition := range rdd.Partitions {
+ rddPartitionAttrs := maps.Clone(attrs)
+
+ rddPartitionAttrs["sparkAppRddPartitionBlockName"] =
+ partition.BlockName
+
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.partition.memory.used" ,
+ partition.MemoryUsed,
+ rddPartitionAttrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "app.storage.rdd.partition.disk.used" ,
+ partition.DiskUsed,
+ rddPartitionAttrs,
+ writer,
+ )
+ }
+ }
+
+ return nil
+}
+
+func writeMemoryMetrics(
+ metricPrefix string,
+ memoryMetrics *SparkExecutorMemoryMetrics,
+ attrs map[string]interface{},
+ writer chan <- model.Metric,
+) {
+ writeGauge(
+ metricPrefix,
+ "usedOnHeapStorage",
+ memoryMetrics.UsedOnHeapStorageMemory,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "usedOffHeapStorage",
+ memoryMetrics.UsedOffHeapStorageMemory,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "totalOnHeapStorage",
+ memoryMetrics.TotalOnHeapStorageMemory,
+ attrs,
+ writer,
+ )
+
+ writeGauge(
+ metricPrefix,
+ "totalOffHeapStorage",
+ memoryMetrics.TotalOffHeapStorageMemory,
+ attrs,
+ writer,
+ )
+}
+
+func writePeakMemoryMetrics(
+ metricPrefix string,
+ peakMemoryMetrics *SparkExecutorPeakMemoryMetrics,
+ attrs map[string]interface{},
+ writer chan <- model.Metric,
+) {
+ writeGauge(
+ metricPrefix,
+ "jvmHeap",
+ peakMemoryMetrics.JVMHeapMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "jvmOffHeap",
+ peakMemoryMetrics.JVMOffHeapMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "onHeapExecution",
+ peakMemoryMetrics.OnHeapExecutionMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "offHeapExecution",
+ peakMemoryMetrics.OffHeapExecutionMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "onHeapStorage",
+ peakMemoryMetrics.OnHeapStorageMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "offHeapStorage",
+ peakMemoryMetrics.OffHeapStorageMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "onHeapUnified",
+ peakMemoryMetrics.OnHeapUnifiedMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "offHeapUnified",
+ peakMemoryMetrics.OffHeapUnifiedMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "directPool",
+ peakMemoryMetrics.DirectPoolMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "mappedPool",
+ peakMemoryMetrics.MappedPoolMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "nettyDirect",
+ peakMemoryMetrics.NettyDirectMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "jvmDirect",
+ peakMemoryMetrics.JvmDirectMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "sparkDirectMemoryOverLimit",
+ peakMemoryMetrics.SparkDirectMemoryOverLimit,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "totalOffHeap",
+ peakMemoryMetrics.TotalOffHeapMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "processTreeJvmVirtual",
+ peakMemoryMetrics.ProcessTreeJVMVMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "processTreeJvmRSS",
+ peakMemoryMetrics.ProcessTreeJVMRSSMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "processTreePythonVirtual",
+ peakMemoryMetrics.ProcessTreePythonVMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "processTreePythonRSS",
+ peakMemoryMetrics.ProcessTreePythonRSSMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "processTreeOtherVirtual",
+ peakMemoryMetrics.ProcessTreeOtherVMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "processTreeOtherRSS",
+ peakMemoryMetrics.ProcessTreeOtherRSSMemory,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "minorGCCount",
+ peakMemoryMetrics.MinorGCCount,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "minorGCTime",
+ peakMemoryMetrics.MinorGCTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "majorGCCount",
+ peakMemoryMetrics.MajorGCCount,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "majorGCTime",
+ peakMemoryMetrics.MajorGCTime,
+ attrs,
+ writer,
+ )
+ writeGauge(
+ metricPrefix,
+ "totalGCTime",
+ peakMemoryMetrics.TotalGCTime,
+ attrs,
+ writer,
+ )
+}
+
+func writeGauge(
+ prefix string,
+ metricName string,
+ metricValue any,
+ attrs map[string]interface{},
+ writer chan <- model.Metric,
+) {
+ metric := model.NewGaugeMetric(
+ prefix + metricName,
+ model.MakeNumeric(metricValue),
+ time.Now(),
+ )
+
+ for k, v := range attrs {
+ metric.Attributes[k] = v
+ }
+
+ writer <- metric
+}
+
+func makeAppAttributesMap(
+ sparkContextUiUrl string,
+ sparkApp *SparkApplication,
+ tags map[string]string,
+) map[string]interface{} {
+ attrs := make(map[string]interface{})
+
+ for k, v := range tags {
+ attrs[k] = v
+ }
+
+ attrs["sparkContextUiUrl"] = sparkContextUiUrl
+ attrs["sparkAppId"] = sparkApp.Id
+ attrs["sparkAppName"] = sparkApp.Name
+
+ return attrs
+}
+
+func makeRequest(
+ url string,
+ authenticator connectors.HttpAuthenticator,
+ response interface{},
+) error {
+ connector := connectors.NewHttpGetConnector(url)
+
+ connector.SetAuthenticator(authenticator)
+ connector.SetHeaders(map[string]string {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ })
+
+ in, err := connector.Request()
+ if err != nil {
+ return err
+ }
+
+ log.Debugf("decoding spark JSON response for URL %s", url)
+
+ dec := json.NewDecoder(in)
+
+ err = dec.Decode(response)
+ if err != nil {
+ return err
+ }
+
+ if log.IsDebugEnabled() {
+ log.PrettyPrintJson(response)
+ }
+
+ return nil
+}
diff --git a/internal/spark/spark.go b/internal/spark/spark.go
new file mode 100644
index 0000000..6ffc98d
--- /dev/null
+++ b/internal/spark/spark.go
@@ -0,0 +1,352 @@
+package spark
+
+import (
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/connectors"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/exporters"
+ "github.com/newrelic/newrelic-labs-sdk/pkg/integration/pipeline"
+ "github.com/spf13/viper"
+)
+
+type SparkApplication struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+}
+
+type SparkExecutorMemoryMetrics struct {
+ UsedOnHeapStorageMemory int `json:"usedOnHeapStorageMemory"`
+ UsedOffHeapStorageMemory int `json:"usedOffHeapStorageMemory"`
+ TotalOnHeapStorageMemory int `json:"totalOnHeapStorageMemory"`
+ TotalOffHeapStorageMemory int `json:"totalOffHeapStorageMemory"`
+}
+
+type SparkExecutorPeakMemoryMetrics struct {
+ JVMHeapMemory int `json:"JVMHeapMemory"`
+ JVMOffHeapMemory int `json:"JVMOffHeapMemory"`
+ OnHeapExecutionMemory int `json:"OnHeapExecutionMemory"`
+ OffHeapExecutionMemory int `json:"OffHeapExecutionMemory"`
+ OnHeapStorageMemory int `json:"OnHeapStorageMemory"`
+ OffHeapStorageMemory int `json:"OffHeapStorageMemory"`
+ OnHeapUnifiedMemory int `json:"OnHeapUnifiedMemory"`
+ OffHeapUnifiedMemory int `json:"OffHeapUnifiedMemory"`
+ DirectPoolMemory int `json:"DirectPoolMemory"`
+ MappedPoolMemory int `json:"MappedPoolMemory"`
+ NettyDirectMemory int `json:"NettyDirectMemory"`
+ JvmDirectMemory int `json:"JvmDirectMemory"`
+ SparkDirectMemoryOverLimit int `json:"SparkDirectMemoryOverLimit"`
+ TotalOffHeapMemory int `json:"TotalOffHeapMemory"`
+ ProcessTreeJVMVMemory int `json:"ProcessTreeJVMVMemory"`
+ ProcessTreeJVMRSSMemory int `json:"ProcessTreeJVMRSSMemory"`
+ ProcessTreePythonVMemory int `json:"ProcessTreePythonVMemory"`
+ ProcessTreePythonRSSMemory int `json:"ProcessTreePythonRSSMemory"`
+ ProcessTreeOtherVMemory int `json:"ProcessTreeOtherVMemory"`
+ ProcessTreeOtherRSSMemory int `json:"ProcessTreeOtherRSSMemory"`
+ MinorGCCount int `json:"MinorGCCount"`
+ MinorGCTime int `json:"MinorGCTime"`
+ MajorGCCount int `json:"MajorGCCount"`
+ MajorGCTime int `json:"MajorGCTime"`
+ TotalGCTime int `json:"TotalGCTime"`
+}
+
+type SparkExecutor struct {
+ Id string `json:"id"`
+ RddBlocks int `json:"rddBlocks"`
+ MemoryUsed int `json:"memoryUsed"`
+ DiskUsed int `json:"diskUsed"`
+ TotalCores int `json:"totalCores"`
+ MaxTasks int `json:"maxTasks"`
+ ActiveTasks int `json:"activeTasks"`
+ FailedTasks int `json:"failedTasks"`
+ CompletedTasks int `json:"completedTasks"`
+ TotalTasks int `json:"totalTasks"`
+ TotalDuration int `json:"totalDuration"`
+ TotalGCTime int `json:"totalGCTime"`
+ TotalInputBytes int `json:"totalInputBytes"`
+ TotalShuffleRead int `json:"totalShuffleRead"`
+ TotalShuffleWrite int `json:"totalShuffleWrite"`
+ MaxMemory int `json:"maxMemory"`
+ MemoryMetrics SparkExecutorMemoryMetrics `json:"memoryMetrics"`
+ PeakMemoryMetrics SparkExecutorPeakMemoryMetrics `json:"peakMemoryMetrics"`
+}
+
+type SparkExecutorSummary struct {
+ TaskTime int `json:"taskTime"`
+ FailedTasks int `json:"failedTasks"`
+ SucceededTasks int `json:"succeededTasks"`
+ KilledTasks int `json:"killedTasks"`
+ InputBytes int `json:"inputBytes"`
+ InputRecords int `json:"inputRecords"`
+ OutputBytes int `json:"outputBytes"`
+ OutputRecords int `json:"outputRecords"`
+ ShuffleRead int `json:"shuffleRead"`
+ ShuffleReadRecords int `json:"shuffleReadRecords"`
+ ShuffleWrite int `json:"shuffleWrite"`
+ ShuffleWriteRecords int `json:"shuffleWriteRecords"`
+ MemoryBytesSpilled int `json:"memoryBytesSpilled"`
+ DiskBytesSpilled int `json:"diskBytesSpilled"`
+ PeakMemoryMetrics SparkExecutorPeakMemoryMetrics `json:"peakMemoryMetrics"`
+}
+
+type SparkJob struct {
+ JobId int `json:"jobId"`
+ Name string `json:"name"`
+ SubmissionTime string `json:"submissionTime"`
+ CompletionTime string `json:"completionTime"`
+ JobGroup string `json:"jobGroup"`
+ /* status=[running|succeeded|failed|unknown] */
+ Status string `json:"status"`
+ NumTasks int `json:"numTasks"`
+ NumActiveTasks int `json:"numActiveTasks"`
+ NumCompletedTasks int `json:"numCompletedTasks"`
+ NumSkippedTasks int `json:"numSkippedTasks"`
+ NumFailedTasks int `json:"numFailedTasks"`
+ NumKilledTasks int `json:"numKilledTasks"`
+ NumCompletedIndices int `json:"numCompletedIndices"`
+ NumActiveStages int `json:"numActiveStages"`
+ NumCompletedStages int `json:"numCompletedStages"`
+ NumSkippedStages int `json:"numSkippedStages"`
+ NumFailedStages int `json:"numFailedStages"`
+}
+
+type SparkTaskMetrics struct {
+ ExecutorDeserializeTime int `json:"executorDeserializeTime"`
+ ExecutorDeserializeCpuTime int `json:"executorDeserializeCpuTime"`
+ ExecutorRunTime int `json:"executorRunTime"`
+ ExecutorCpuTime int `json:"executorCpuTime"`
+ ResultSize int `json:"resultSize"`
+ JvmGcTime int `json:"jvmGcTime"`
+ ResultSerializationTime int `json:"resultSerializationTime"`
+ MemoryBytesSpilled int `json:"memoryBytesSpilled"`
+ DiskBytesSpilled int `json:"diskBytesSpilled"`
+ PeakExecutionMemory int `json:"peakExecutionMemory"`
+ InputMetrics struct {
+ BytesRead int `json:"bytesRead"`
+ RecordsRead int `json:"recordsRead"`
+ } `json:"inputMetrics"`
+ OutputMetrics struct {
+ BytesWritten int `json:"bytesWritten"`
+ RecordsWritten int `json:"recordsWritten"`
+ } `json:"outputMetrics"`
+ ShuffleReadMetrics struct {
+ RemoteBlocksFetched int `json:"remoteBlocksFetched"`
+ LocalBlocksFetched int `json:"localBlocksFetched"`
+ FetchWaitTime int `json:"fetchWaitTime"`
+ RemoteBytesRead int `json:"remoteBytesRead"`
+ RemoteBytesReadToDisk int `json:"remoteBytesReadToDisk"`
+ LocalBytesRead int `json:"localBytesRead"`
+ RecordsRead int `json:"recordsRead"`
+ RemoteReqsDuration int `json:"remoteReqsDuration"`
+ SufflePushReadMetrics struct {
+ CorruptMergedBlockChunks int `json:"corruptMergedBlockChunks"`
+ MergedFetchFallbackCount int `json:"mergedFetchFallbackCount"`
+ RemoteMergedBlocksFetched int `json:"remoteMergedBlocksFetched"`
+ LocalMergedBlocksFetched int `json:"localMergedBlocksFetched"`
+ RemoteMergedChunksFetched int `json:"remoteMergedChunksFetched"`
+ LocalMergedChunksFetched int `json:"localMergedChunksFetched"`
+ RemoteMergedBytesRead int `json:"remoteMergedBytesRead"`
+ LocalMergedBytesRead int `json:"localMergedBytesRead"`
+ RemoteMergedReqsDuration int `json:"remoteMergedReqsDuration"`
+ } `json:"shufflePushReadMetrics"`
+ } `json:"shuffleReadMetrics"`
+ ShuffleWriteMetrics struct {
+ BytesWritten int `json:"bytesWritten"`
+ WriteTime int `json:"writeTime"`
+ RecordsWritten int `json:"recordsWritten"`
+ } `json:"shuffleWriteMetrics"`
+ PhotonMemoryMetrics struct {
+ OffHeapMinMemorySize int `json:"offHeapMinMemorySize"`
+ OffHeapMaxMemorySize int `json:"offHeapMaxMemorySize"`
+ PhotonBufferPoolMinMemorySize int `json:"photonBufferPoolMinMemorySize"`
+ PhotonBufferPoolMaxMemorySize int `json:"photonBufferPoolMaxMemorySize"`
+ } `json:"photonMemoryMetrics"`
+ PhotonizedTaskTimeNs int `json:"photonizedTaskTimeNs"`
+}
+
+type SparkTask struct {
+ TaskId int `json:"taskId"`
+ Index int `json:"index"`
+ Attempt int `json:"attempt"`
+ PartitionId int `json:"partitionId"`
+ LaunchTime string `json:"launchTime"`
+ Duration int `json:"duration"`
+ ExecutorId string `json:"executorId"`
+ /*
+ taskStatus=[RUNNING|SUCCESS|FAILED|KILLED|PENDING]
+ */
+ Status string `json:"status"`
+ TaskLocality string `json:"taskLocality"`
+ Speculative bool `json:"speculative"`
+ TaskMetrics SparkTaskMetrics `json:"taskMetrics"`
+ SchedulerDelay int `json:"schedulerDelay"`
+ GettingResultTime int `json:"gettingResultTime"`
+}
+
+type SparkStage struct {
+ /*
+ enum StageStatus {
+ STAGE_STATUS_UNSPECIFIED = 0;
+ STAGE_STATUS_ACTIVE = 1;
+ STAGE_STATUS_COMPLETE = 2;
+ STAGE_STATUS_FAILED = 3;
+ STAGE_STATUS_PENDING = 4;
+ STAGE_STATUS_SKIPPED = 5;
+ }
+ */
+ Status string `json:"status"`
+ StageId int `json:"stageId"`
+ AttemptId int `json:"attemptId"`
+ NumTasks int `json:"numTasks"`
+ NumActiveTasks int `json:"numActiveTasks"`
+ NumCompleteTasks int `json:"numCompleteTasks"`
+ NumFailedTasks int `json:"numFailedTasks"`
+ NumKilledTasks int `json:"numKilledTasks"`
+ NumCompletedIndices int `json:"numCompletedIndices"`
+ PeakNettyDirectMemory int `json:"peakNettyDirectMemory"`
+ PeakJvmDirectMemory int `json:"peakJvmDirectMemory"`
+ PeakSparkDirectMemoryOverLimit int `json:"peakSparkDirectMemoryOverLimit"`
+ PeakTotalOffHeapMemory int `json:"peakTotalOffHeapMemory"`
+ SubmissionTime string `json:"submissionTime"`
+ FirstTaskLaunchedTime string `json:"firstTaskLaunchedTime"`
+ CompletionTime string `json:"completionTime"`
+ ExecutorDeserializeTime int `json:"executorDeserializeTime"`
+ ExecutorDeserializeCpuTime int `json:"executorDeserializeCpuTime"`
+ ExecutorRunTime int `json:"executorRunTime"`
+ ExecutorCpuTime int `json:"executorCpuTime"`
+ ResultSize int `json:"resultSize"`
+ JvmGcTime int `json:"jvmGcTime"`
+ ResultSerializationTime int `json:"resultSerializationTime"`
+ MemoryBytesSpilled int `json:"memoryBytesSpilled"`
+ DiskBytesSpilled int `json:"diskBytesSpilled"`
+ PeakExecutionMemory int `json:"peakExecutionMemory"`
+ InputBytes int `json:"inputBytes"`
+ InputRecords int `json:"inputRecords"`
+ OutputBytes int `json:"outputBytes"`
+ OutputRecords int `json:"outputRecords"`
+ ShuffleRemoteBlocksFetched int `json:"shuffleRemoteBlocksFetched"`
+ ShuffleLocalBlocksFetched int `json:"shuffleLocalBlocksFetched"`
+ ShuffleFetchWaitTime int `json:"shuffleFetchWaitTime"`
+ ShuffleRemoteBytesRead int `json:"shuffleRemoteBytesRead"`
+ ShuffleRemoteBytesReadToDisk int `json:"shuffleRemoteBytesReadToDisk"`
+ ShuffleLocalBytesRead int `json:"shuffleLocalBytesRead"`
+ ShuffleReadBytes int `json:"shuffleReadBytes"`
+ ShuffleReadRecords int `json:"shuffleReadRecords"`
+ ShuffleCorruptMergedBlockChunks int `json:"shuffleCorruptMergedBlockChunks"`
+ ShuffleMergedFetchFallbackCount int `json:"shuffleMergedFetchFallbackCount"`
+ ShuffleMergedRemoteBlocksFetched int `json:"shuffleMergedRemoteBlocksFetched"`
+ ShuffleMergedLocalBlocksFetched int `json:"shuffleMergedLocalBlocksFetched"`
+ ShuffleMergedRemoteChunksFetched int `json:"shuffleMergedRemoteChunksFetched"`
+ ShuffleMergedLocalChunksFetched int `json:"shuffleMergedLocalChunksFetched"`
+ ShuffleMergedRemoteBytesRead int `json:"shuffleMergedRemoteBytesRead"`
+ ShuffleMergedLocalBytesRead int `json:"shuffleMergedLocalBytesRead"`
+ ShuffleRemoteReqsDuration int `json:"shuffleRemoteReqsDuration"`
+ ShuffleMergedRemoteReqsDuration int `json:"shuffleMergedRemoteReqsDuration"`
+ ShuffleWriteBytes int `json:"shuffleWriteBytes"`
+ ShuffleWriteTime int `json:"shuffleWriteTime"`
+ ShuffleWriteRecords int `json:"shuffleWriteRecords"`
+ Name string `json:"name"`
+ SchedulingPool string `json:"schedulingPool"`
+ Tasks map[string]SparkTask `json:"tasks"`
+ ExecutorSummary map[string]SparkExecutorSummary `json:"executorSummary"`
+ ResourceProfileId int `json:"resourceProfileId"`
+ PeakMemoryMetrics SparkExecutorPeakMemoryMetrics `json:"peakMemoryMetrics"`
+ ShuffleMergersCount int `json:"shuffleMergersCount"`
+}
+
+type SparkRDD struct {
+ Id int `json:"id"`
+ Name string `json:"name"`
+ NumPartitions int `json:"numPartitions"`
+ NumCachedPartitions int `json:"numCachedPartitions"`
+ MemoryUsed int `json:"memoryUsed"`
+ DiskUsed int `json:"diskUsed"`
+ DataDistribution []struct {
+ MemoryUsed int `json:"memoryUsed"`
+ MemoryRemaining int `json:"memoryRemaining"`
+ DiskUsed int `json:"diskUsed"`
+ OnHeapMemoryUsed int `json:"onHeapMemoryUsed"`
+ OffHeapMemoryUsed int `json:"offHeapMemoryUsed"`
+ OnHeapMemoryRemaining int `json:"onHeapMemoryRemaining"`
+ OffHeapMemoryRemaining int `json:"offHeapMemoryRemaining"`
+ } `json:"dataDistribution"`
+ Partitions []struct {
+ BlockName string `json:"blockName"`
+ MemoryUsed int `json:"memoryUsed"`
+ DiskUsed int `json:"diskUsed"`
+ Executors []string `json:"executors"`
+ } `json:"partitions"`
+}
+
+/*
+ @TODO: Support non-Databricks Spark deployments
+func InitPipelines(i *integration.LabsIntegration) error {
+ sparkContextUrls := viper.GetStringSlice("spark.contexts")
+ if len(sparkContextUrls) == 0 {
+ return fmt.Errorf("no spark context urls specified")
+ }
+
+ for _, sparkContextUrl := range sparkContextUrls {
+ err := InitPipelinesForContext(
+ i,
+ sparkContextUrl,
+ nil,
+ nil,
+ )
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+*/
+
+func InitPipelinesForContext(
+ i *integration.LabsIntegration,
+ sparkContextUiUrl string,
+ authenticator connectors.HttpAuthenticator,
+ tags map[string]string,
+) error {
+ // Create the newrelic exporter
+ newRelicExporter := exporters.NewNewRelicExporter(
+ "newrelic-api",
+ i,
+ )
+
+ // Create a metrics pipeline
+ mp := pipeline.NewMetricsPipeline()
+ mp.AddExporter(newRelicExporter)
+
+ err := setupReceivers(
+ i,
+ mp,
+ sparkContextUiUrl,
+ authenticator,
+ tags,
+ )
+ if err != nil {
+ return err
+ }
+
+ i.AddPipeline(mp)
+
+ return nil
+}
+
+func setupReceivers(
+ i *integration.LabsIntegration,
+ mp *pipeline.MetricsPipeline,
+ sparkContextUiUrl string,
+ authenticator connectors.HttpAuthenticator,
+ tags map[string]string,
+) error {
+ sparkReceiver := NewSparkMetricsReceiver(
+ i,
+ sparkContextUiUrl,
+ authenticator,
+ viper.GetString("spark.metricPrefix"),
+ tags,
+ )
+
+ mp.AddReceiver(sparkReceiver)
+
+ return nil
+}
diff --git a/opentelemetry/README.md b/opentelemetry/README.md
deleted file mode 100644
index 95c1a4c..0000000
--- a/opentelemetry/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## OpenTelemetry Collector Integration with Databricks
-
-This Readme provides step-by-step instructions for setting up the OpenTelemetry Collector using `ApacheSpark` receiver for monitoring Databricks.
-
-### Adding Initialization Scripts to Databricks
-
-Databricks Initialization Scripts are shell scripts that run when a cluster is starting. They are useful for setting up custom configurations or third-party integrations such as setting up monitoring agents.
-
-### Step 1: Download and Extract OpenTelemetry Collector
-
-1. **Add script to Databricks:** Create new file in workspace as otel-installation.sh and add the below script to download and extract the OpenTelemetry Collector Contrib archive.
-
-```bash
-curl --proto '=https' --tlsv1.2 -fOL https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.95.0/otelcol-contrib_0.95.0_linux_386.tar.gz
-tar -xvf otelcol-contrib_0.95.0_linux_386.tar.gz
-```
-
-### Step 2: Create the Configuration File
-Create a config.yaml file for the OpenTelemetry Collector with your desired configuration. Below is an example configuration, replace $DRIVER_HOST and $SPARKUIPORT with specific values related to your environment.
-
-```bash
-receivers:
- apachespark:
- collection_interval: 5s
- endpoint: http://$DRIVER_HOST:$SPARKUIPORT
-
-exporters:
- otlp:
- endpoint:
- headers:
- api-key:
-
-service:
- pipelines:
- metrics:
- receivers: [apachespark]
- exporters: [otlp]
-```
-
-### Step 3: Configure Initialization Script
-* Add the script to your Databricks cluster:** To add the initialization script to your cluster in Databricks, follow these steps.
-
- - Navigate to your Databricks workspace and go to the `Clusters` page.
- - Choose the cluster you want to add the script to and click `Edit`.
- - In the `Advanced Options` section, find the `Init Scripts` field.
- - Click on `Add`, then in the Script Path input, select workspace or cloud storage path where your script is stored.
- - Click `Confirm` and then `Update`.
diff --git a/pkg/config/config.go b/pkg/config/config.go
deleted file mode 100644
index 7a7f53c..0000000
--- a/pkg/config/config.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package config
-
-import (
- "strconv"
-
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/deser"
-)
-
-// Recever configuration
-type RecvConfig struct {
- Connectors []connect.Connector
- Deser deser.DeserFunc
-}
-
-// Processor configuration
-type ProcConfig struct {
- Model any
-}
-
-type ExporterType string
-
-func (expor ExporterType) Check() bool {
- switch expor {
- case NrApi:
- case NrMetrics:
- case NrEvents:
- case NrLogs:
- case NrTraces:
- case NrInfra:
- case Prometheus:
- case Otel:
- case Dummy:
- default:
- return false
- }
- return true
-}
-
-const (
- NrInfra ExporterType = "nrinfra"
- NrApi ExporterType = "nrapi"
- NrMetrics ExporterType = "nrmetrics"
- NrEvents ExporterType = "nrevents"
- NrLogs ExporterType = "nrlogs"
- NrTraces ExporterType = "nrtraces"
- Otel ExporterType = "otel"
- Prometheus ExporterType = "prom"
- Dummy ExporterType = "dummy"
-)
-
-// Data pipeline configuration.
-type PipelineConfig struct {
- Interval uint
- Exporter ExporterType
- Custom map[string]any
-}
-
-func (conf PipelineConfig) GetString(key string) (string, bool) {
- val, ok := conf.Custom[key]
- if ok {
- val_str, ok := val.(string)
- if ok {
- return val_str, true
- }
-
- val_int, ok := val.(int)
- if ok {
- return strconv.Itoa(val_int), true
- }
-
- return "", false
- } else {
- return "", false
- }
-}
-
-func (conf PipelineConfig) GetInt(key string) (int, bool) {
- val, ok := conf.Custom[key]
- if ok {
- val_int, ok := val.(int)
- if ok {
- return val_int, true
- } else {
- return 0, false
- }
- } else {
- return 0, false
- }
-}
-
-func (conf PipelineConfig) GetMap(key string) (map[string]any, bool) {
- val, ok := conf.Custom[key]
- if ok {
- val_map, ok := val.(map[string]any)
- if ok {
- return val_map, true
- } else {
- return nil, false
- }
- } else {
- return nil, false
- }
-}
diff --git a/pkg/connect/http.go b/pkg/connect/http.go
deleted file mode 100644
index 05f89ad..0000000
--- a/pkg/connect/http.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package connect
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "net/http"
- "strings"
- "time"
-)
-
-type HttpMethod int
-
-const (
- Undefined HttpMethod = iota
- Get
- Post
- Head
- Put
- Delete
- Connect
- Options
- Trace
- Patch
-)
-
-const defaultTimeout = 5 * time.Second
-
-type BuilderFunc = func(*HttpConfig) (*http.Request, error)
-
-type HttpConfig struct {
- Method HttpMethod
- Url string
- Body any
- Headers map[string]string
- Builder BuilderFunc
- Timeout time.Duration
- Model string
- Data map[string]interface{}
-}
-
-type HttpConnector struct {
- Config HttpConfig
-}
-
-func (c *HttpConnector) SetConfig(config any) {
- conf, ok := config.(HttpConfig)
- if !ok {
- panic("Expected an HttpConfig struct.")
- }
- c.Config = conf
-}
-
-func (c *HttpConnector) Request() ([]byte, ConnecError) {
- if c.Config.Builder != nil {
- req, err := c.Config.Builder(&c.Config)
- if err != nil {
- return nil, MakeConnectErr(err, 0)
- }
- res, reqErr := httpRequest(req, c.Config.Timeout)
- return []byte(res), reqErr
- } else {
- switch c.Config.Method {
- case Get:
- res, err := httpGet(c.Config.Url, c.Config.Headers, c.Config.Timeout)
- return []byte(res), err
- case Post:
- switch body := c.Config.Body.(type) {
- case io.Reader:
- res, err := httpPost(c.Config.Url, body, c.Config.Headers, c.Config.Timeout)
- return []byte(res), err
- case string:
- res, err := httpPost(c.Config.Url, strings.NewReader(body), c.Config.Headers, c.Config.Timeout)
- return []byte(res), err
- case []byte:
- res, err := httpPost(c.Config.Url, bytes.NewReader(body), c.Config.Headers, c.Config.Timeout)
- return []byte(res), err
- default:
- return nil, MakeConnectErr(errors.New("Unsupported type for body"), 0)
- }
- default:
- return nil, MakeConnectErr(errors.New("Unsupported HTTP method"), 0)
- }
- }
-}
-
-func (c *HttpConnector) SetConnectorModelName(model string) {
- c.Config.Model = model
-}
-
-func (c *HttpConnector) ConnectorModel() string {
- return c.Config.Model
-}
-
-func (c *HttpConnector) SetCustomData(data map[string]interface{}) {
- c.Config.Data = data
-}
-
-func (c *HttpConnector) ConnectorCustomData() map[string]interface{} {
- return c.Config.Data
-}
-
-func (c *HttpConnector) ConnectorID() string {
- return "HTTP"
-}
-
-func (c *HttpConnector) SetReqBuilder(builder BuilderFunc) {
- c.Config.Builder = builder
-}
-
-func (c *HttpConnector) SetMethod(method HttpMethod) {
- c.Config.Method = method
-}
-
-func (c *HttpConnector) SetUrl(url string) {
- c.Config.Url = url
-}
-
-func (c *HttpConnector) SetBody(body any) {
- c.Config.Body = body
-}
-
-func (c *HttpConnector) SetHeaders(headers map[string]string) {
- c.Config.Headers = headers
-}
-
-func (c *HttpConnector) SetTimeout(timeout time.Duration) {
- c.Config.Timeout = timeout
-}
-
-// Build and Connector for HTTP GET requests.
-func MakeHttpGetConnector(url string, headers map[string]string) HttpConnector {
- return HttpConnector{
- Config: HttpConfig{
- Method: Get,
- Url: url,
- Headers: headers,
- Timeout: defaultTimeout,
- },
- }
-}
-
-// Build and Connector for HTTP POST requests.
-func MakeHttpPostConnector(url string, body any, headers map[string]string) HttpConnector {
- return HttpConnector{
- Config: HttpConfig{
- Method: Post,
- Url: url,
- Body: body,
- Headers: headers,
- Timeout: defaultTimeout,
- },
- }
-}
-
-// Build and Connector for HTTP GET requests.
-func MakeHttpConnectorWithBuilder(builder BuilderFunc) HttpConnector {
- return HttpConnector{
- Config: HttpConfig{
- Builder: builder,
- Timeout: defaultTimeout,
- },
- }
-}
-
-func httpGet(url string, headers map[string]string, timeout time.Duration) (string, ConnecError) {
- req, err := http.NewRequest("GET", url, nil)
-
- for key, val := range headers {
- req.Header.Add(key, val)
- }
-
- client := http.DefaultClient
- client.Timeout = timeout
- resp, err := client.Do(req)
-
- if err != nil {
- if resp != nil {
- return "", MakeConnectErr(err, resp.StatusCode)
- } else {
- return "", MakeConnectErr(err, 0)
- }
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return "", MakeConnectErr(err, resp.StatusCode)
- }
-
- return string(body), ConnecError{}
-}
-
-func httpPost(url string, reqBody io.Reader, headers map[string]string, timeout time.Duration) (string, ConnecError) {
- req, err := http.NewRequest("POST", url, reqBody)
-
- for key, val := range headers {
- req.Header.Add(key, val)
- }
-
- client := http.DefaultClient
- client.Timeout = timeout
- resp, err := client.Do(req)
-
- if err != nil {
- if resp != nil {
- return "", MakeConnectErr(err, resp.StatusCode)
- } else {
- return "", MakeConnectErr(err, 0)
- }
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return "", MakeConnectErr(err, resp.StatusCode)
- }
-
- return string(body), ConnecError{}
-}
-
-func httpRequest(req *http.Request, timeout time.Duration) (string, ConnecError) {
- client := http.DefaultClient
- client.Timeout = timeout
- resp, err := client.Do(req)
-
- if err != nil {
- if resp != nil {
- return "", MakeConnectErr(err, resp.StatusCode)
- } else {
- return "", MakeConnectErr(err, 0)
- }
- }
- defer resp.Body.Close()
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return "", MakeConnectErr(err, resp.StatusCode)
- }
-
- return string(body), ConnecError{}
-}
diff --git a/pkg/connect/interface.go b/pkg/connect/interface.go
deleted file mode 100644
index 2970678..0000000
--- a/pkg/connect/interface.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package connect
-
-type ConnecError struct {
- Err error
- ErrCode int
-}
-
-func MakeConnectErr(err error, errCode int) ConnecError {
- return ConnecError{
- Err: err,
- ErrCode: errCode,
- }
-}
-
-type Connector interface {
- SetConfig(any)
- Request() ([]byte, ConnecError)
- ConnectorID() string
- ConnectorModel() string
- ConnectorCustomData() map[string]interface{}
-}
diff --git a/pkg/deser/interface.go b/pkg/deser/interface.go
deleted file mode 100644
index d32e235..0000000
--- a/pkg/deser/interface.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package deser
-
-type DeserFunc = func([]byte) (interface{}, string, error)
diff --git a/pkg/deser/json.go b/pkg/deser/json.go
deleted file mode 100644
index 8391789..0000000
--- a/pkg/deser/json.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package deser
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// JSON deserializer
-//func DeserJson(data []byte, v *map[string]any) error {
-// err := json.Unmarshal(data, v)
-// if err != nil {
-// log.Error("Deser error = ", err.Error())
-// }
-// return err
-//}
-
-func DeserJson(data []byte) (any, string, error) {
- var v map[string]interface{}
- if err := json.Unmarshal(data, &v); err == nil {
- return v, "object", nil
- }
-
- var a []map[string]interface{}
- if err := json.Unmarshal(data, &a); err == nil {
- return a, "array", nil
- }
-
- return nil, "", fmt.Errorf("unknown type")
-
-}
diff --git a/pkg/env/infra/config.go b/pkg/env/infra/config.go
deleted file mode 100644
index 8503378..0000000
--- a/pkg/env/infra/config.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package infra
-
-import (
- "errors"
- "newrelic/multienv/pkg/config"
- "os"
-
- "gopkg.in/yaml.v3"
-)
-
-func LoadConfig() (config.PipelineConfig, error) {
- confPath := os.Getenv("CONFIG_PATH")
- if confPath == "" {
- return config.PipelineConfig{}, errors.New("CONFIG_PATH environment variable is empty")
- }
-
- yamlFile, err := os.ReadFile(confPath)
- if err != nil {
- return config.PipelineConfig{}, err
- }
-
- var pipeConfigMap map[string]any
- err = yaml.Unmarshal(yamlFile, &pipeConfigMap)
- if err != nil {
- return config.PipelineConfig{}, err
- }
-
- var pipeConfig = config.PipelineConfig{
- Interval: 0,
- Exporter: config.ExporterType(config.NrInfra),
- Custom: pipeConfigMap,
- }
-
- return pipeConfig, nil
-}
diff --git a/pkg/env/lambda/config.go b/pkg/env/lambda/config.go
deleted file mode 100644
index 91393d3..0000000
--- a/pkg/env/lambda/config.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package lambda
-
-import (
- "os"
- "strconv"
- "strings"
-
- "newrelic/multienv/pkg/config"
-)
-
-func LoadConfig() config.PipelineConfig {
- pipeConf := config.PipelineConfig{
- Custom: map[string]any{},
- }
-
- for _, e := range os.Environ() {
- pair := strings.SplitN(e, "=", 2)
- key := pair[0]
- val := pair[1]
- switch key {
- case "interval":
- interval, err := strconv.ParseUint(val, 10, 0)
- if err != nil {
- interval = 60
- }
- pipeConf.Interval = uint(interval)
- case "exporter":
- exporter := config.ExporterType(val)
- if exporter.Check() {
- pipeConf.Exporter = exporter
- }
- default:
- pipeConf.Custom[key] = val
- }
- }
-
- return pipeConf
-}
diff --git a/pkg/env/standalone/buffer.go b/pkg/env/standalone/buffer.go
deleted file mode 100644
index 3d10fd4..0000000
--- a/pkg/env/standalone/buffer.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package standalone
-
-type Buffer[T any] interface {
- Put(T) bool
- Capacity() int
- Size() int
- Clear() *[]T
-}
diff --git a/pkg/env/standalone/config.go b/pkg/env/standalone/config.go
deleted file mode 100644
index ed1481a..0000000
--- a/pkg/env/standalone/config.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package standalone
-
-import (
- "errors"
- "os"
- "sync"
- "sync/atomic"
-
- "newrelic/multienv/pkg/config"
-
- "gopkg.in/yaml.v3"
-)
-
-type SharedConfig[T any] struct {
- workerConfig T
- workerConfigMu sync.Mutex
- workerIsRunning atomic.Bool
-}
-
-func (w *SharedConfig[T]) SetIsRunning() bool {
- return w.workerIsRunning.Swap(true)
-}
-
-func (w *SharedConfig[T]) SetConfig(config T) {
- w.workerConfigMu.Lock()
- w.workerConfig = config
- w.workerConfigMu.Unlock()
-}
-
-func (w *SharedConfig[T]) Config() T {
- w.workerConfigMu.Lock()
- config := w.workerConfig
- w.workerConfigMu.Unlock()
- return config
-}
-
-func LoadConfig(filePath string) (config.PipelineConfig, error) {
- yamlFile, err := os.ReadFile(filePath)
- if err != nil {
- return config.PipelineConfig{}, err
- }
-
- var pipeConfigMap map[string]any
- err = yaml.Unmarshal(yamlFile, &pipeConfigMap)
- if err != nil {
- return config.PipelineConfig{}, err
- }
-
- interval, ok := pipeConfigMap["interval"]
- if !ok {
- interval = 60
- }
-
- if _, ok := interval.(int); !ok {
- return config.PipelineConfig{}, errors.New("Interval must be an integer")
- }
-
- if interval.(int) <= 0 {
- interval = 60
- }
-
- delete(pipeConfigMap, "interval")
- delete(pipeConfigMap, "exporter")
-
- var pipeConfig = config.PipelineConfig{
- Interval: uint(interval.(int)),
- Exporter: config.NrMetrics,
- Custom: pipeConfigMap,
- }
-
- return pipeConfig, nil
-}
diff --git a/pkg/env/standalone/expor_worker.go b/pkg/env/standalone/expor_worker.go
deleted file mode 100644
index 9b4a479..0000000
--- a/pkg/env/standalone/expor_worker.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package standalone
-
-import (
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/export"
- "newrelic/multienv/pkg/model"
- "time"
-
- log "github.com/sirupsen/logrus"
-)
-
-type ExpWorkerConfig struct {
- InChannel <-chan model.MeltModel
- BatchSize int
- HarvestTime int
- Exporter export.ExportFunc
-}
-
-type ExporterWorker struct {
- config ExpWorkerConfig
- pipeConfig config.PipelineConfig
- isRunning bool
-}
-
-//var expWorkerConfig SharedConfig[ExpWorkerConfig]
-//var pipelineConfig SharedConfig[config.PipelineConfig]
-
-func MakeExporterWorker(config ExpWorkerConfig, pipeConf config.PipelineConfig) ExporterWorker {
- return ExporterWorker{config: config, pipeConfig: pipeConf, isRunning: false}
-}
-
-func (exporterWorker *ExporterWorker) InitExporter() {
- if !exporterWorker.isRunning {
- log.Println("Starting exporter worker...")
- exporterWorker.isRunning = true
- go exporterWorker.exporterWorker()
- } else {
- log.Println("Exporter worker already running, config updated.")
- }
-}
-
-func (exporterWorker *ExporterWorker) exporterWorker() {
- buffer := MakeReservoirBuffer[model.MeltModel](500)
- pre := time.Now().Unix()
-
- for {
- config := exporterWorker.config
- harvestTime := time.Duration(config.HarvestTime) * time.Second
-
- data := <-config.InChannel
- switch data.Type {
- case model.Metric:
- metric, _ := data.Metric()
- log.Println("Exporter received a Metric", metric.Name)
- case model.Event:
- event, _ := data.Event()
- log.Println("Exporter received an Event", event.Type)
- case model.Log:
- dlog, _ := data.Log()
- log.Println("Exporter received a Log", dlog.Message, dlog.Type)
- case model.Trace:
- //TODO
- log.Warn("TODO: Exporter received a Trace")
- }
-
- buffer.Put(data)
-
- now := time.Now().Unix()
- bufSize := buffer.Size()
-
- if now-pre >= int64(harvestTime.Seconds()) || bufSize >= config.BatchSize {
- buf := *buffer.Clear()
-
- log.Println("Harvest cycle, buffer size = ", bufSize)
-
- err := config.Exporter(exporterWorker.pipeConfig, buf[0:bufSize])
-
- if err != nil {
- log.Error("Exporter failed = ", err)
- //TODO: handle error condition, refill buffer? Discard data? Retry?
- }
-
- pre = time.Now().Unix()
- }
- }
-}
diff --git a/pkg/env/standalone/pipeline.go b/pkg/env/standalone/pipeline.go
deleted file mode 100644
index b19b5df..0000000
--- a/pkg/env/standalone/pipeline.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package standalone
-
-import (
- "newrelic/multienv/integration"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/export"
- "newrelic/multienv/pkg/model"
-
- log "github.com/sirupsen/logrus"
-)
-
-// Init data pipeline.
-func InitPipeline(pipeConf config.PipelineConfig, recvConfig config.RecvConfig, procConfig config.ProcConfig, proc ProcessorFunc) {
- bufferSize, _ := pipeConf.GetInt("buffer")
-
- if bufferSize < 100 {
- bufferSize = 100
- }
-
- if bufferSize > 1000 {
- bufferSize = 1000
- }
-
- batchSize, _ := pipeConf.GetInt("batch_size")
-
- if batchSize < 10 {
- batchSize = 10
- }
-
- if batchSize > 100 {
- batchSize = 100
- }
-
- harvestTime, _ := pipeConf.GetInt("harvest_time")
-
- if harvestTime < 60 {
- harvestTime = 60
- }
-
- recvToProcCh := make(chan map[string]any, bufferSize)
- metricsProcToExpCh := make(chan model.MeltModel, bufferSize)
- eventsProcToExpCh := make(chan model.MeltModel, bufferSize)
-
- metricsExporter := MakeExporterWorker(ExpWorkerConfig{
- InChannel: metricsProcToExpCh,
- HarvestTime: harvestTime,
- BatchSize: batchSize,
- Exporter: export.SelectExporter(config.NrMetrics),
- }, pipeConf)
-
- metricsExporter.InitExporter()
-
- eventsExporter := MakeExporterWorker(ExpWorkerConfig{
- InChannel: eventsProcToExpCh,
- HarvestTime: harvestTime,
- BatchSize: batchSize,
- Exporter: export.SelectExporter(config.NrEvents),
- }, pipeConf)
-
- eventsExporter.InitExporter()
-
- InitProcessor(ProcWorkerConfig{
- Processor: proc,
- Model: procConfig.Model,
- InChannel: recvToProcCh,
- MetricsOutChannel: metricsProcToExpCh,
- EventsOutChannel: eventsProcToExpCh,
- })
- InitReceiver(RecvWorkerConfig{
- IntervalSec: pipeConf.Interval,
- Connectors: recvConfig.Connectors,
- Deserializer: recvConfig.Deser,
- OutChannel: recvToProcCh,
- })
-}
-
-// Start Integration
-func Start(pipeConf config.PipelineConfig) error {
- recvConfig, err := integration.InitRecv(&pipeConf)
- if err != nil {
- log.Error("Error initializing receiver: ", err)
- return err
- }
- procConfig, err := integration.InitProc(&pipeConf)
- if err != nil {
- log.Error("Error initializing processor: ", err)
- return err
- }
- InitPipeline(pipeConf, recvConfig, procConfig, integration.Proc)
- return nil
-}
diff --git a/pkg/env/standalone/proc_worker.go b/pkg/env/standalone/proc_worker.go
deleted file mode 100644
index 082ddb9..0000000
--- a/pkg/env/standalone/proc_worker.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package standalone
-
-import (
- "newrelic/multienv/pkg/model"
-
- "github.com/mitchellh/mapstructure"
- log "github.com/sirupsen/logrus"
-)
-
-type ProcessorFunc = func(any) []model.MeltModel
-
-type ProcWorkerConfig struct {
- Processor ProcessorFunc
- Model any
- InChannel <-chan map[string]any
- MetricsOutChannel chan<- model.MeltModel
- EventsOutChannel chan<- model.MeltModel
-}
-
-var procWorkerConfigHoldr SharedConfig[ProcWorkerConfig]
-
-func InitProcessor(config ProcWorkerConfig) {
- procWorkerConfigHoldr.SetConfig(config)
- if !procWorkerConfigHoldr.SetIsRunning() {
- log.Println("Starting processor worker...")
- go processorWorker()
- } else {
- log.Println("Processor worker already running, config updated.")
- }
-}
-
-func processorWorker() {
- for {
- config := procWorkerConfigHoldr.Config()
- configModel := config.Model
- data := <-config.InChannel
- err := mapstructure.Decode(data, &configModel)
- if err == nil {
- for _, val := range config.Processor(configModel) {
-
- switch val.Type {
- case model.Metric:
- config.MetricsOutChannel <- val
- case model.Event:
- config.EventsOutChannel <- val
-
- default:
- log.Warn("Model type unknown")
- }
- }
- } else {
- log.Error("Error decoding data = ", err)
- }
- }
-}
diff --git a/pkg/env/standalone/recv_worker.go b/pkg/env/standalone/recv_worker.go
deleted file mode 100644
index 9c2fe4e..0000000
--- a/pkg/env/standalone/recv_worker.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package standalone
-
-import (
- "sync"
- "time"
-
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/deser"
-
- log "github.com/sirupsen/logrus"
-)
-
-type RecvWorkerConfig struct {
- IntervalSec uint
- Connectors []connect.Connector
- Deserializer deser.DeserFunc
- OutChannel chan<- map[string]any
-}
-
-var recvWorkerConfigHoldr SharedConfig[RecvWorkerConfig]
-
-func InitReceiver(config RecvWorkerConfig) {
- recvWorkerConfigHoldr.SetConfig(config)
- if !recvWorkerConfigHoldr.SetIsRunning() {
- log.Println("Starting receiver worker...")
- go receiverWorker()
- } else {
- log.Println("Receiver worker already running, config updated.")
- }
-}
-
-func receiverWorker() {
- for {
- config := recvWorkerConfigHoldr.Config()
- pre := time.Now().Unix()
-
- wg := &sync.WaitGroup{}
- wg.Add(len(config.Connectors))
-
- for _, connector := range config.Connectors {
- go func(connector connect.Connector) {
- defer wg.Done()
-
- data, err := connector.Request()
- if err.Err != nil {
- log.Error("Http Get error = ", err.Err.Error())
- delayBeforeNextReq(pre, &config)
- return
- }
-
- //log.Println("Data received: ", string(data))
-
- response, jsonType, desErr := config.Deserializer(data)
- if desErr == nil {
- switch jsonType {
- case "array":
- var rdata = map[string]any{}
- for _, responseObject := range response.([]map[string]any) {
- rdata["model"] = connector.ConnectorModel()
- rdata["customData"] = connector.ConnectorCustomData()
- rdata["response"] = responseObject
- config.OutChannel <- rdata
- }
- case "object":
- var rdata = map[string]any{}
- rdata["model"] = connector.ConnectorModel()
- rdata["customData"] = connector.ConnectorCustomData()
- rdata["response"] = response.(map[string]any)
- config.OutChannel <- rdata
- default:
- log.Warn("Response data type couldn't be identified")
- }
- }
- }(connector)
- }
-
- wg.Wait()
- log.Println("All Requests Completed")
-
- // Delay before the next request
- delayBeforeNextReq(pre, &config)
- }
-}
-
-func delayBeforeNextReq(pre int64, config *RecvWorkerConfig) {
- timeDiff := time.Now().Unix() - pre
- if timeDiff < int64(config.IntervalSec) {
- remainingDelay := int64(config.IntervalSec) - timeDiff
- time.Sleep(time.Duration(remainingDelay) * time.Second)
- }
-}
diff --git a/pkg/env/standalone/reservoir.go b/pkg/env/standalone/reservoir.go
deleted file mode 100644
index bf3f274..0000000
--- a/pkg/env/standalone/reservoir.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package standalone
-
-import (
- "math/rand"
-)
-
-type ReservoirBuffer[T any] struct {
- buff []T
- size int
- capacity int
- index int
-}
-
-func MakeReservoirBuffer[T any](capacity int) Buffer[T] {
- return &ReservoirBuffer[T]{
- buff: make([]T, capacity),
- size: 0,
- capacity: capacity,
- index: 0,
- }
-}
-
-// Implement reservoir sampling Algorithm R
-func (b *ReservoirBuffer[T]) Put(val T) bool {
- accepted := false
- if b.index < b.capacity {
- // Fill buffer
- b.buff[b.index] = val
- b.size += 1
- accepted = true
- } else {
- // Buffer is full, start sampling
- j := rand.Intn(b.index)
- if j < b.capacity {
- b.buff[j] = val
- accepted = true
- } else {
- accepted = false
- }
- }
- b.index += 1
- return accepted
-}
-
-func (b *ReservoirBuffer[T]) Capacity() int {
- return b.capacity
-}
-
-func (b *ReservoirBuffer[T]) Size() int {
- return b.size
-}
-
-func (b *ReservoirBuffer[T]) Clear() *[]T {
- ret := b.buff
- b.buff = make([]T, b.capacity)
- b.size = 0
- b.index = 0
- return &ret
-}
diff --git a/pkg/export/interface.go b/pkg/export/interface.go
deleted file mode 100644
index 95473e4..0000000
--- a/pkg/export/interface.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package export
-
-import (
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/model"
-
- log "github.com/sirupsen/logrus"
-)
-
-type ExportFunc = func(config.PipelineConfig, []model.MeltModel) error
-
-func dummyExporter(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Warn("Dummy Exporter, do nothing")
- log.Warn(" Data = ", data)
- log.Warn(" Config = ", pipeConf)
- return nil
-}
-
-func SelectExporter(exporterType config.ExporterType) ExportFunc {
- switch exporterType {
- case config.NrApi:
- return exportNrApi
- case config.NrEvents:
- return exportNrEvent
- case config.NrMetrics:
- return exportNrMetric
- case config.NrLogs:
- return exportNrLog
- case config.NrTraces:
- return exportNrTrace
- case config.NrInfra:
- return exportNrInfra
- case config.Otel:
- return exportOtel
- case config.Prometheus:
- return exportProm
- default:
- return dummyExporter
- }
-}
diff --git a/pkg/export/nrapi.go b/pkg/export/nrapi.go
deleted file mode 100644
index b30a62a..0000000
--- a/pkg/export/nrapi.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package export
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/json"
- "errors"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/model"
-
- log "github.com/sirupsen/logrus"
-)
-
-func exportNrApi(pipeConf config.PipelineConfig, melt []model.MeltModel) error {
- log.Print("------> NR API Exporter = ", melt)
-
- metricArray := []model.MeltModel{}
- eventArray := []model.MeltModel{}
- logArray := []model.MeltModel{}
-
- for _, element := range melt {
- switch element.Data.(type) {
- case model.MetricModel:
- metricArray = append(metricArray, element)
- case model.EventModel:
- eventArray = append(eventArray, element)
- case model.LogModel:
- logArray = append(logArray, element)
- case model.TraceModel:
- //TODO: implement traces
- }
- }
-
- if len(metricArray) > 0 {
- exportNrMetric(pipeConf, metricArray)
- }
-
- if len(eventArray) > 0 {
- exportNrEvent(pipeConf, eventArray)
- }
-
- if len(logArray) > 0 {
- exportNrLog(pipeConf, logArray)
- }
-
- return nil
-}
-
-func exportNrEvent(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> NR Event Exporter = ", data)
-
- jsonModel, err := intoNrEvent(data)
- if err != nil {
- log.Error("Error generating NR Event API data = ", err)
- return err
- }
-
- log.Print("NR Event JSON = ", string(jsonModel))
-
- return nrApiRequest(pipeConf, jsonModel, getEventEndpoint(pipeConf))
-}
-
-func exportNrMetric(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> NR Metric Exporter = ", data)
-
- jsonModel, err := intoNrMetric(data)
- if err != nil {
- log.Error("Error generating NR Metric API data = ", err)
- return err
- }
-
- log.Print("NR Metric JSON = ", string(jsonModel))
-
- return nrApiRequest(pipeConf, jsonModel, getMetricEndpoint(pipeConf))
-}
-
-func exportNrLog(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> NR Log Exporter = ", data)
-
- jsonModel, err := intoNrLog(data)
- if err != nil {
- log.Error("Error generating NR Log API data = ", err)
- return err
- }
-
- log.Print("NR Log JSON = ", string(jsonModel))
-
- return nrApiRequest(pipeConf, jsonModel, getLogEndpoint(pipeConf))
-}
-
-func exportNrTrace(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> TODO: NR Trace Exporter = ", data)
- return nil
-}
-
-func getEndpoint(pipeConf config.PipelineConfig) string {
- endpoint, ok := pipeConf.GetString("nr_endpoint")
- if !ok {
- endpoint = "US"
- } else {
- if endpoint != "US" && endpoint != "EU" {
- endpoint = "US"
- }
- }
- return endpoint
-}
-
-func getEventEndpoint(pipeConf config.PipelineConfig) string {
- accountId, ok := pipeConf.GetString("nr_account_id")
- if !ok {
- log.Error("'nr_account_id' not found in the pipeline config")
- return ""
- }
- if getEndpoint(pipeConf) == "US" {
- return "https://insights-collector.newrelic.com/v1/accounts/" + accountId + "/events"
- } else {
- return "https://insights-collector.eu01.nr-data.net/v1/accounts/" + accountId + "/events"
- }
-}
-
-func getMetricEndpoint(pipeConf config.PipelineConfig) string {
- if getEndpoint(pipeConf) == "US" {
- return "https://metric-api.newrelic.com/metric/v1"
- } else {
- return "https://metric-api.eu.newrelic.com/metric/v1"
- }
-}
-
-func getLogEndpoint(pipeConf config.PipelineConfig) string {
- if getEndpoint(pipeConf) == "US" {
- return "https://log-api.newrelic.com/log/v1"
- } else {
- return "https://log-api.eu.newrelic.com/log/v1"
- }
-}
-
-func intoNrEvent(meltData []model.MeltModel) ([]byte, error) {
- events := make([]map[string]any, 0)
- for _, element := range meltData {
- event, ok := element.Event()
- if ok {
- nrevent := map[string]any{
- "eventType": event.Type,
- "timestamp": element.Timestamp,
- }
- if element.Attributes != nil {
- for k, v := range element.Attributes {
- if k != "eventType" && k != "timestamp" {
- nrevent[k] = v
- }
- }
- }
- events = append(events, nrevent)
- }
- }
-
- return json.Marshal(events)
-}
-
-func intoNrMetric(meltData []model.MeltModel) ([]byte, error) {
- metrics := make([]map[string]any, 0)
- for _, element := range meltData {
- metric, ok := element.Metric()
- if ok {
- var nrmetric map[string]any
-
- switch metric.Type {
- case model.Gauge, model.CumulativeCount:
- nrmetric = map[string]any{
- "name": metric.Name,
- "type": "gauge",
- "value": metric.Value.Value(),
- "timestamp": element.Timestamp,
- }
- if element.Attributes != nil {
- nrmetric["attributes"] = element.Attributes
- }
- case model.Count:
- nrmetric = map[string]any{
- "name": metric.Name,
- "type": "count",
- "value": metric.Value.Value(),
- "interval.ms": metric.Interval.Milliseconds(),
- "timestamp": element.Timestamp,
- }
- if element.Attributes != nil {
- nrmetric["attributes"] = element.Attributes
- }
- case model.Summary:
- //TODO: implement summary metrics
- default:
- // Skip this metric
- continue
- }
-
- if element.Attributes != nil {
- nrmetric["attributes"] = element.Attributes
- }
- metrics = append(metrics, nrmetric)
- }
- }
-
- metricModel := []any{
- map[string]any{
- "metrics": metrics,
- },
- }
-
- return json.Marshal(metricModel)
-}
-
-func intoNrLog(meltData []model.MeltModel) ([]byte, error) {
- logs := make([]map[string]any, 0)
- for _, element := range meltData {
- log, ok := element.Log()
- if ok {
- nrlog := map[string]any{
- "message": log.Message,
- "timestamp": element.Timestamp,
- "logtype": log.Type,
- }
- if element.Attributes != nil {
- nrlog["attributes"] = element.Attributes
- }
- logs = append(logs, nrlog)
- }
- }
-
- logModel := []any{
- map[string]any{
- "logs": logs,
- },
- }
-
- return json.Marshal(logModel)
-}
-
-func gzipString(inputData string) ([]byte, error) {
- var b bytes.Buffer
- gz := gzip.NewWriter(&b)
- if _, err := gz.Write([]byte(inputData)); err != nil {
- return nil, err
- }
- if err := gz.Close(); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-func nrApiRequest(pipeConf config.PipelineConfig, jsonModel []byte, endpoint string) error {
- apiKey, ok := pipeConf.GetString("nr_api_key")
- if !ok {
- return errors.New("'nr_api_key' not found in the pipeline config")
- }
- headers := map[string]string{
- "Api-Key": apiKey,
- "Content-Type": "application/json",
- "Content-Encoding": "gzip",
- }
-
- gzipBody, errGzip := gzipString(string(jsonModel))
- if errGzip != nil {
- log.Error("Error compressing body = ", errGzip)
- return errGzip
- }
-
- connector := connect.MakeHttpPostConnector(endpoint, gzipBody, headers)
- response, errReq := connector.Request()
- if errReq.Err != nil {
- log.Error("Error sending request to NR API = ", errReq)
- return errReq.Err
- }
-
- log.Print("Response from NR API = ", string(response))
-
- return nil
-}
diff --git a/pkg/export/nrinfra.go b/pkg/export/nrinfra.go
deleted file mode 100644
index 1b5d483..0000000
--- a/pkg/export/nrinfra.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package export
-
-import (
- "fmt"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/model"
- "strconv"
- "time"
-
- log "github.com/sirupsen/logrus"
-
- "github.com/newrelic/infra-integrations-sdk/v4/data/event"
- "github.com/newrelic/infra-integrations-sdk/v4/data/metric"
- "github.com/newrelic/infra-integrations-sdk/v4/integration"
-)
-
-const NrInfraInventory = "NrInfraInventory"
-
-// Inventory model to insert into a MeltModel (as Custom MeltType)
-type NrInfraInventoryData struct {
- key string
- field string
- value any
-}
-
-// Build a MeltModel with a custom NrInfraInventoryData
-func MakeInventory(key string, field string, value any) model.MeltModel {
- data := NrInfraInventoryData{
- key: key,
- field: field,
- value: value,
- }
- return model.MakeCustom(NrInfraInventory, data, time.Now())
-}
-
-func exportNrInfra(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> NR Infra Exporter = ", data)
-
- name, ok := pipeConf.GetString("name")
- if !ok {
- name = "InfraIntegration"
- }
-
- version, ok := pipeConf.GetString("version")
- if !ok {
- version = "0.1.0"
- }
-
- // Create integration
- i, err := integration.New(name, version)
- if err != nil {
- log.Error("Error creating Nr Infra integration", err)
- return err
- }
-
- entityName, ok := pipeConf.GetString("entity_name")
- if !ok {
- entityName = "EntityName"
- }
-
- entityType, ok := pipeConf.GetString("entity_type")
- if !ok {
- entityType = "EntityType"
- }
-
- entityDisplay, ok := pipeConf.GetString("entity_display")
- if !ok {
- entityDisplay = "EntityDisplay"
- }
-
- // Create entity
- entity, err := i.NewEntity(entityName, entityType, entityDisplay)
- if err != nil {
- log.Error("Error creating entity", err)
- return err
- }
-
- for _, d := range data {
- if d.Type == model.Event || d.Type == model.Log {
- ev, ok := d.Event()
- if ok {
- nriEv, err := event.New(time.UnixMilli(d.Timestamp), "Event of type "+ev.Type, ev.Type)
- nriEv.Attributes = d.Attributes
- if err != nil {
- log.Error("Error creating event", err)
- } else {
- entity.AddEvent(nriEv)
- }
- }
- } else if d.Type == model.Metric {
- m, ok := d.Metric()
- if ok {
- switch m.Type {
- case model.Gauge:
- gauge, err := integration.Gauge(time.UnixMilli(d.Timestamp), m.Name, m.Value.Float())
- addAttributes(&d, &gauge)
- if err != nil {
- log.Error("Error creating gauge metric", err)
- } else {
- entity.AddMetric(gauge)
- }
- case model.Count, model.CumulativeCount:
- //TODO: NO TIME INTERVAL???
- count, err := integration.Count(time.UnixMilli(d.Timestamp), m.Name, m.Value.Float())
- addAttributes(&d, &count)
- if err != nil {
- log.Error("Error creating count metric", err)
- } else {
- entity.AddMetric(count)
- }
- case model.Summary:
- //TODO
- }
- }
- } else if d.Type == model.Custom {
- inv, ok := d.Custom()
- if ok {
- if inv.Id == NrInfraInventory {
- dat, ok := inv.Data.(NrInfraInventoryData)
- if ok {
- entity.AddInventoryItem(dat.key, dat.field, dat.value)
- } else {
- log.Error("Custom data should be of type NrInfraInventoryData")
- }
- } else {
- log.Warn("Ignored data, not NrInfraInventory")
- }
- }
- } else {
- log.Warn("Ignored data, not a metric, event or log: ", d)
- }
- }
-
- i.AddEntity(entity)
-
- err = i.Publish()
- if err != nil {
- log.Error("Error publishing", err)
- return err
- }
-
- return nil
-}
-
-func addAttributes(model *model.MeltModel, metric *metric.Metric) {
- for k, v := range model.Attributes {
- switch val := v.(type) {
- case string:
- (*metric).AddDimension(k, val)
- case int:
- (*metric).AddDimension(k, strconv.Itoa(val))
- case float32:
- (*metric).AddDimension(k, strconv.FormatFloat(float64(val), 'f', 2, 32))
- case float64:
- (*metric).AddDimension(k, strconv.FormatFloat(val, 'f', 2, 32))
- case fmt.Stringer:
- (*metric).AddDimension(k, val.String())
- default:
- log.Warn("Attribute of unsupported type: ", k, v)
- }
-
- }
-}
diff --git a/pkg/export/otel.go b/pkg/export/otel.go
deleted file mode 100644
index 7c96799..0000000
--- a/pkg/export/otel.go
+++ /dev/null
@@ -1,402 +0,0 @@
-package export
-
-import (
- "encoding/json"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/connect"
- "newrelic/multienv/pkg/model"
-
- log "github.com/sirupsen/logrus"
-)
-
-//// OTel data model for Logs
-
-type otelLogsData struct {
- ResourceLogs []otelResourceLog `json:"resourceLogs"`
-}
-
-type otelResourceLog struct {
- //TODO: add "resource" model
- ScopeLogs []otelScopeLog `json:"scopeLogs"`
-}
-
-type otelScopeLog struct {
- //TODO: add "scope" model
- LogRecords []otelLogRecord `json:"logRecords"`
-}
-
-type otelLogRecord struct {
- TimeUnixNano int64 `json:"timeUnixNano"`
- ObservedTimeUnixNano int64 `json:"observedTimeUnixNano"`
- SeverityText string `json:"severityText"`
- Body stringAttribute `json:"body"`
- Attributes []attribute `json:"attributes"`
-}
-
-//// OTel data model for Metrics
-
-type otelMetricsData struct {
- ResourceMetrics []otelResourceMetrics `json:"resourceMetrics"`
-}
-
-type otelResourceMetrics struct {
- //TODO: add "resource" model
- ScopeMetrics []otelScopeMetrics `json:"scopeMetrics"`
-}
-
-type otelScopeMetrics struct {
- //TODO: add "scope" model
- Metrics []any `json:"metrics"`
-}
-
-type otelMetricSum struct {
- Name string `json:"name"`
- Sum otelMetricSumData `json:"sum"`
-}
-
-type otelAggrTemp int
-
-const (
- AggrTempUnspecified otelAggrTemp = 0
- AggrTempDelta otelAggrTemp = 1
- AggrTempCumulative otelAggrTemp = 2
-)
-
-type otelMetricSumData struct {
- AggregationTemporality otelAggrTemp `json:"aggregationTemporality"`
- IsMonotonic bool `json:"isMonotonic"`
- DataPoints []otelNumberDataPoint `json:"dataPoints"`
-}
-
-type otelMetricGauge struct {
- Name string `json:"name"`
- Gauge otelMetricGaugeData `json:"gauge"`
-}
-
-type otelMetricGaugeData struct {
- DataPoints []otelNumberDataPoint `json:"dataPoints"`
-}
-
-type otelNumberDataPoint struct {
- //TODO: also include AsInt
- AsDouble float64 `json:"asDouble"`
- TimeUnixNano int64 `json:"timeUnixNano"`
- StartTimeUnixNano int64 `json:"startTimeUnixNano,omitempty"`
- Attributes []attribute `json:"attributes"`
-}
-
-//TODO: summary metric model
-
-//// Common OTel models
-
-//TODO: resource model
-//TODO: scope model
-
-type attribute struct {
- Key string `json:"key"`
- Value any `json:"value"`
-}
-
-type stringAttribute struct {
- StringValue string `json:"stringValue"`
-}
-
-type intAttribute struct {
- IntValue int64 `json:"intValue"`
-}
-
-type doubleAttribute struct {
- DoubleValue float64 `json:"doubleValue"`
-}
-
-type boolAttribute struct {
- BoolValue bool `json:"boolValue"`
-}
-
-func exportOtel(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- metrics := make([]model.MeltModel, 0)
- logs := make([]model.MeltModel, 0)
-
- for _, m := range data {
- switch m.Type {
- case model.Metric:
- metrics = append(metrics, m)
- case model.Event, model.Log:
- logs = append(logs, m)
- case model.Trace:
- //TODO: append traces
- }
- }
-
- if len(metrics) > 0 {
- err := exportOtelMetrics(pipeConf, metrics)
- if err != nil {
- return err
- }
- }
-
- if len(logs) > 0 {
- err := exportOtelLogs(pipeConf, logs)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func exportOtelMetrics(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> OpenTelemetry Metric Exporter = ", data)
-
- metrics := make([]any, 0)
-
- // Generate metrics
- for _, d := range data {
- m, _ := d.Metric()
- switch m.Type {
- case model.Gauge:
- gauge := buildOtelGaugeMetric(d)
- metrics = append(metrics, gauge)
- case model.Count:
- count := buildOtelCountMetric(d)
- metrics = append(metrics, count)
- case model.CumulativeCount:
- count := buildOtelCumulCountMetric(d)
- metrics = append(metrics, count)
- case model.Summary:
- //TODO
- }
- }
-
- metricData := otelMetricsData{
- ResourceMetrics: []otelResourceMetrics{
- {
- ScopeMetrics: []otelScopeMetrics{
- {
- Metrics: metrics,
- },
- },
- },
- },
- }
-
- jsonData, err := json.Marshal(metricData)
- if err != nil {
- return err
- }
-
- log.Print("Metric data in JSON = ", string(jsonData))
-
- return otlpMetricRequest(pipeConf, jsonData)
-}
-
-func buildOtelGaugeMetric(melt model.MeltModel) otelMetricGauge {
- metric, _ := melt.Metric()
- return otelMetricGauge{
- Name: metric.Name,
- Gauge: otelMetricGaugeData{
- DataPoints: []otelNumberDataPoint{
- {
- AsDouble: metric.Value.Float(),
- TimeUnixNano: melt.Timestamp * 1000000,
- StartTimeUnixNano: 0,
- Attributes: convertAttributes(melt.Attributes),
- },
- },
- },
- }
-}
-
-func buildOtelCountMetric(melt model.MeltModel) otelMetricSum {
- metric, _ := melt.Metric()
- return otelMetricSum{
- Name: metric.Name,
- Sum: otelMetricSumData{
- IsMonotonic: true,
- AggregationTemporality: AggrTempDelta,
- DataPoints: []otelNumberDataPoint{
- {
- AsDouble: metric.Value.Float(),
- TimeUnixNano: melt.Timestamp * 1000000,
- StartTimeUnixNano: (melt.Timestamp - metric.Interval.Milliseconds()) * 1000000,
- Attributes: convertAttributes(melt.Attributes),
- },
- },
- },
- }
-}
-
-func buildOtelCumulCountMetric(melt model.MeltModel) otelMetricSum {
- metric, _ := melt.Metric()
- return otelMetricSum{
- Name: metric.Name,
- Sum: otelMetricSumData{
- IsMonotonic: true,
- AggregationTemporality: AggrTempCumulative,
- DataPoints: []otelNumberDataPoint{
- {
- AsDouble: metric.Value.Float(),
- TimeUnixNano: melt.Timestamp * 1000000,
- Attributes: convertAttributes(melt.Attributes),
- },
- },
- },
- }
-}
-
-func exportOtelLogs(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> OpenTelemetry Log Exporter = ", data)
-
- logRecords := []otelLogRecord{}
- for _, d := range data {
- l, _ := d.Log()
- logRecord := otelLogRecord{
- TimeUnixNano: d.Timestamp * 1000000,
- ObservedTimeUnixNano: d.Timestamp * 1000000,
- SeverityText: l.Type,
- Body: stringAttribute{
- StringValue: l.Message,
- },
- Attributes: convertAttributes(d.Attributes),
- }
- logRecords = append(logRecords, logRecord)
- }
-
- logsData := otelLogsData{
- ResourceLogs: []otelResourceLog{
- {
- ScopeLogs: []otelScopeLog{
- {
- LogRecords: logRecords,
- },
- },
- },
- },
- }
-
- jsonData, err := json.Marshal(logsData)
- if err != nil {
- return err
- }
-
- log.Print("Logs data in JSON = ", string(jsonData))
-
- return otlpLogRequest(pipeConf, jsonData)
-}
-
-func convertAttributes(attr map[string]any) []attribute {
- attrArr := []attribute{}
- for k, v := range attr {
- switch v := v.(type) {
- case string:
- a := attribute{
- Key: k,
- Value: stringAttribute{
- StringValue: v,
- },
- }
- attrArr = append(attrArr, a)
- case int:
- a := attribute{
- Key: k,
- Value: intAttribute{
- IntValue: int64(v),
- },
- }
- attrArr = append(attrArr, a)
- case float64:
- a := attribute{
- Key: k,
- Value: doubleAttribute{
- DoubleValue: v,
- },
- }
- attrArr = append(attrArr, a)
- case bool:
- a := attribute{
- Key: k,
- Value: boolAttribute{
- BoolValue: v,
- },
- }
- attrArr = append(attrArr, a)
- }
- }
- return attrArr
-}
-
-func otlpMetricRequest(pipeConf config.PipelineConfig, jsonModel []byte) error {
- endpoint, ok := pipeConf.GetString("otel_metric_endpoint")
- if !ok {
- endpoint = getOtelScheme(pipeConf) + "://" + getOtelEndpoint(pipeConf) + "/v1/metrics"
- }
- return otlpRequest(pipeConf, jsonModel, endpoint)
-}
-
-func otlpLogRequest(pipeConf config.PipelineConfig, jsonModel []byte) error {
- endpoint, ok := pipeConf.GetString("otel_log_endpoint")
- if !ok {
- endpoint = getOtelScheme(pipeConf) + "://" + getOtelEndpoint(pipeConf) + "/v1/logs"
- }
- return otlpRequest(pipeConf, jsonModel, endpoint)
-}
-
-func otlpRequest(pipeConf config.PipelineConfig, jsonModel []byte, endpoint string) error {
- headers := getOtelHeaders(pipeConf)
- headers["Content-Type"] = "application/json"
- headers["Content-Encoding"] = "gzip"
-
- gzipBody, errGzip := gzipString(string(jsonModel))
- if errGzip != nil {
- log.Error("Error compressing body = ", errGzip)
- return errGzip
- }
-
- connector := connect.MakeHttpPostConnector(endpoint, gzipBody, headers)
- response, errReq := connector.Request()
- if errReq.Err != nil {
- log.Error("Error sending request to OTel collector = ", errReq)
- return errReq.Err
- }
-
- log.Print("Response from OTel collector = ", string(response))
-
- return nil
-}
-
-func getOtelEndpoint(pipeConf config.PipelineConfig) string {
- endpoint, ok := pipeConf.GetString("otel_endpoint")
- if ok {
- return endpoint
- } else {
- log.Warn("'otel_endpoint' not specified, fallback to 'otlp.nr-data.net:4318'")
- return "otlp.nr-data.net:4318"
- }
-}
-
-func getOtelScheme(pipeConf config.PipelineConfig) string {
- scheme, ok := pipeConf.GetString("otel_scheme")
- if ok {
- return scheme
- } else {
- return "https"
- }
-}
-
-func getOtelHeaders(pipeConf config.PipelineConfig) map[string]string {
- any_headers, ok := pipeConf.GetMap("otel_headers")
- if ok {
- headers := map[string]string{}
- for k, v := range any_headers {
- switch v := v.(type) {
- case string:
- headers[k] = v
- default:
- log.Warn("Found a non string value in 'otel_headers', ignoring")
- }
- }
- return headers
- } else {
- return map[string]string{}
- }
-}
diff --git a/pkg/export/prom.go b/pkg/export/prom.go
deleted file mode 100644
index b696f1d..0000000
--- a/pkg/export/prom.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package export
-
-import (
- "context"
- "errors"
- "fmt"
- "newrelic/multienv/pkg/config"
- "newrelic/multienv/pkg/model"
- "regexp"
- "strconv"
- "time"
-
- "github.com/castai/promwrite"
-
- log "github.com/sirupsen/logrus"
-)
-
-func exportProm(pipeConf config.PipelineConfig, data []model.MeltModel) error {
- log.Print("------> Prometheus Exporter = ", data)
-
- endpoint, ok := pipeConf.GetString("prom_endpoint")
- if !ok {
- return errors.New("Config key 'prom_endpoint' doesn't exist")
- }
-
- metrics := make([]promwrite.TimeSeries, 0)
-
- for _, m := range data {
- switch m.Type {
- case model.Metric:
- timeSeries, ok := metricIntoProm(m)
- if ok {
- metrics = append(metrics, timeSeries)
- } else {
- log.Warn("Metric can't be converted to Prometheus format")
- }
- default:
- log.Warn("Data sample is not a metric, can't be sent to Prometheus")
- }
- }
-
- if len(metrics) == 0 {
- return nil
- }
-
- promHeaders := getPromHeaders(pipeConf)
- credentials, ok_credentials := pipeConf.GetString("prom_credentials")
- if ok_credentials {
- promHeaders["Authorization"] = "Bearer " + credentials
- }
-
- client := promwrite.NewClient(endpoint)
- resp, err := client.Write(context.Background(), &promwrite.WriteRequest{TimeSeries: metrics}, promwrite.WriteHeaders(promHeaders))
-
- if err != nil {
- return err
- }
-
- log.Print("Prom response = ", resp)
-
- return nil
-}
-
-func metricIntoProm(melt model.MeltModel) (promwrite.TimeSeries, bool) {
- metric, ok := melt.Metric()
- if !ok {
- return promwrite.TimeSeries{}, false
- }
-
- labels := attributesToPromLabels(melt.Attributes)
- labels = append(labels, promwrite.Label{
- Name: "__name__",
- Value: nameToProm(metric.Name),
- })
-
- switch metric.Type {
- // We are just ignoring the interval, because prometheus doesn't support delta counters
- case model.Gauge, model.Count, model.CumulativeCount:
- return promwrite.TimeSeries{
- Labels: labels,
- Sample: promwrite.Sample{
- Time: time.UnixMilli(melt.Timestamp),
- Value: metric.Value.Float(),
- },
- }, true
- default:
- return promwrite.TimeSeries{}, false
- }
-}
-
-func attributesToPromLabels(attr map[string]any) []promwrite.Label {
- labels := []promwrite.Label{}
- for k, v := range attr {
- switch val := v.(type) {
- case string:
- labels = append(labels, promwrite.Label{
- Name: nameToProm(k),
- Value: val,
- })
- case int:
- labels = append(labels, promwrite.Label{
- Name: nameToProm(k),
- Value: strconv.Itoa(val),
- })
- case float32:
- labels = append(labels, promwrite.Label{
- Name: nameToProm(k),
- Value: strconv.FormatFloat(float64(val), 'f', 2, 32),
- })
- case float64:
- labels = append(labels, promwrite.Label{
- Name: nameToProm(k),
- Value: strconv.FormatFloat(val, 'f', 2, 32),
- })
- case fmt.Stringer:
- labels = append(labels, promwrite.Label{
- Name: nameToProm(k),
- Value: val.String(),
- })
- default:
- log.Warn("Attribute of unsupported type: ", k, v)
- }
- }
- return labels
-}
-
-// Convert name into prometheus naming conventions: only allow [a-zA-Z0-9_:]
-func nameToProm(name string) string {
- namePattern := regexp.MustCompile(`[^a-zA-Z0-9_:]`)
- return string(namePattern.ReplaceAll([]byte(name), []byte("_")))
-}
-
-func getPromHeaders(pipeConf config.PipelineConfig) map[string]string {
- any_headers, ok := pipeConf.GetMap("prom_headers")
- if ok {
- headers := map[string]string{}
- for k, v := range any_headers {
- switch v := v.(type) {
- case string:
- headers[k] = v
- default:
- log.Warn("Found a non string value in 'prom_headers', ignoring")
- }
- }
- return headers
- } else {
- return map[string]string{}
- }
-}
diff --git a/pkg/model/event.go b/pkg/model/event.go
deleted file mode 100644
index 608925d..0000000
--- a/pkg/model/event.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package model
-
-import "time"
-
-// Event model variant.
-type EventModel struct {
- Type string
-}
-
-// Make a event.
-func MakeEvent(evType string, attributes map[string]any, timestamp time.Time) MeltModel {
- return MeltModel{
- Type: Event,
- Timestamp: timestamp.UnixMilli(),
- Attributes: attributes,
- Data: EventModel{
- Type: evType,
- },
- }
-}
diff --git a/pkg/model/log.go b/pkg/model/log.go
deleted file mode 100644
index ac3cdb0..0000000
--- a/pkg/model/log.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package model
-
-import (
- "time"
-)
-
-// Log model variant.
-type LogModel struct {
- Message string
- Type string
-}
-
-// Make a log.
-func MakeLog(message string, logType string, timestamp time.Time) MeltModel {
- return MeltModel{
- Type: Log,
- Timestamp: timestamp.UnixMilli(),
- Data: LogModel{
- Message: message,
- Type: logType,
- },
- }
-}
diff --git a/pkg/model/metric.go b/pkg/model/metric.go
deleted file mode 100644
index 160fc9b..0000000
--- a/pkg/model/metric.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package model
-
-import "time"
-
-type MetricType int
-
-const (
- Gauge MetricType = iota
- Count
- CumulativeCount
- Summary
- // TODO: add histogram metric type
-)
-
-// Metric model variant.
-type MetricModel struct {
- Name string
- Type MetricType
- Value Numeric
- Interval time.Duration
- //TODO: summary and histogram metric data model
-}
-
-// Make a gauge metric.
-func MakeGaugeMetric(name string, value Numeric, timestamp time.Time) MeltModel {
- return MeltModel{
- Type: Metric,
- Timestamp: timestamp.UnixMilli(),
- Data: MetricModel{
- Name: name,
- Type: Gauge,
- Value: value,
- },
- }
-}
-
-// Make a delta count metric.
-func MakeCountMetric(name string, value Numeric, interval time.Duration, timestamp time.Time) MeltModel {
- return MeltModel{
- Type: Metric,
- Timestamp: timestamp.UnixMilli(),
- Data: MetricModel{
- Name: name,
- Type: Count,
- Value: value,
- Interval: interval,
- },
- }
-}
-
-// Make a cumulative count metric.
-func MakeCumulativeCountMetric(name string, value Numeric, timestamp time.Time) MeltModel {
- return MeltModel{
- Type: Metric,
- Timestamp: timestamp.UnixMilli(),
- Data: MetricModel{
- Name: name,
- Type: CumulativeCount,
- Value: value,
- },
- }
-}
-
-//TODO: make summary metric
diff --git a/pkg/model/model.go b/pkg/model/model.go
deleted file mode 100644
index 97e52c0..0000000
--- a/pkg/model/model.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package model
-
-import (
- "encoding/json"
- "errors"
- "strconv"
- "time"
-
- log "github.com/sirupsen/logrus"
-
- "github.com/mitchellh/mapstructure"
-)
-
-type MeltType int
-
-const (
- Metric MeltType = iota
- Event
- Log
- Trace
- Custom
-)
-
-// Intermediate model.
-type MeltModel struct {
- Type MeltType
- // Unix timestamp in millis.
- Timestamp int64
- Attributes map[string]any
- // Either a MetricModel, EventModel, LogModel, TraceModel or CustomModel.
- Data any
-}
-
-// Custom model variant.
-type CustomModel struct {
- Id string
- Data any
-}
-
-// Make a custom model.
-func MakeCustom(id string, data any, timestamp time.Time) MeltModel {
- return MeltModel{
- Type: Custom,
- Timestamp: timestamp.UnixMilli(),
- Data: CustomModel{
- Id: id,
- Data: data,
- },
- }
-}
-
-func (receiver *MeltModel) UnmarshalJSON(data []byte) error {
- var dict map[string]any
- err := json.Unmarshal(data, &dict)
- if err != nil {
- return err
- }
-
- var model MeltModel
- err = mapstructure.Decode(dict, &model)
- if err != nil {
- return err
- }
-
- meltData := model.Data.(map[string]any)
- switch model.Type {
- case Metric:
- var metricModel MetricModel
- err := mapstructure.Decode(meltData, &metricModel)
- if err != nil {
- return err
- }
- model.Data = metricModel
- case Event:
- var eventModel EventModel
- err := mapstructure.Decode(meltData, &eventModel)
- if err != nil {
- return err
- }
- model.Data = eventModel
- case Log:
- var logModel LogModel
- err := mapstructure.Decode(meltData, &logModel)
- if err != nil {
- return err
- }
- model.Data = logModel
- case Trace:
- //TODO: unmarshal Trace model
-
- //TODO: Unmarshal Custom model
-
- default:
- return errors.New("'Type' contains an invalid value " + strconv.Itoa(int(model.Type)))
- }
-
- *receiver = model
- return nil
-}
-
-func (m *MeltModel) Metric() (MetricModel, bool) {
- model, ok := m.Data.(MetricModel)
- return model, ok
-}
-
-// Event obtains an EventModel from the MeltModel.
-// If the inner data is a LogModel, it will be converted into an EventModel.
-// This transformation may cause data loss: if the Log had a key in `Attributes` named "message", it will be overwritten
-// with the contents of the `Message` field.
-func (m *MeltModel) Event() (EventModel, bool) {
- if m.Type == Log {
- // Convert Log into an Event
- logModel := m.Data.(LogModel)
- model := EventModel{
- Type: logModel.Type,
- }
- if m.Attributes == nil {
- m.Attributes = map[string]any{}
- }
- // Warning: if the log already had an attribute named "message", it will be overwritten
- if _, ok := m.Attributes["message"]; ok {
- log.Warn("Log2Event: Log already had an attribute named 'message', overwriting it")
- }
- m.Attributes["message"] = logModel.Message
- return model, true
- } else {
- model, ok := m.Data.(EventModel)
- return model, ok
- }
-}
-
-// Log obtains a LogModel from the MeltModel.
-// If the inner data is an EventModel, it will be converted into a LogModel.
-// If the Event doesn't have a key in `Attributes` named "message", the `Message` field will remain empty.
-func (m *MeltModel) Log() (LogModel, bool) {
- if m.Type == Event {
- message, ok := m.Attributes["message"].(string)
- if ok {
- delete(m.Attributes, "message")
- }
- eventModel, _ := m.Data.(EventModel)
- model := LogModel{
- Type: eventModel.Type,
- Message: message,
- }
- return model, true
- } else {
- model, ok := m.Data.(LogModel)
- return model, ok
- }
-}
-
-func (m *MeltModel) Trace() (TraceModel, bool) {
- model, ok := m.Data.(TraceModel)
- return model, ok
-}
-
-func (m *MeltModel) Custom() (CustomModel, bool) {
- model, ok := m.Data.(CustomModel)
- return model, ok
-}
-
-// Numeric model.
-type Numeric struct {
- IntOrFlt bool // true = Int, false = Float
- IntVal int64
- FltVal float64
-}
-
-// Numeric holds an integer.
-func (n *Numeric) IsInt() bool {
- return n.IntOrFlt
-}
-
-// Numeric holds a float.
-func (n *Numeric) IsFloat() bool {
- return !n.IntOrFlt
-}
-
-// Get float from Numeric.
-func (n *Numeric) Float() float64 {
- if n.IsFloat() {
- return n.FltVal
- } else {
- return float64(n.IntVal)
- }
-}
-
-// Get int from Numeric.
-func (n *Numeric) Int() int64 {
- if n.IsInt() {
- return n.IntVal
- } else {
- return int64(n.FltVal)
- }
-}
-
-// Get whatever it is.
-func (n *Numeric) Value() any {
- if n.IsInt() {
- return n.IntVal
- } else {
- return n.FltVal
- }
-}
-
-// Make a Numeric from an int64.
-func MakeIntNumeric(val int64) Numeric {
- return Numeric{
- IntOrFlt: true,
- IntVal: val,
- FltVal: 0.0,
- }
-}
-
-// Make a Numeric from a float64.
-func MakeFloatNumeric(val float64) Numeric {
- return Numeric{
- IntOrFlt: false,
- IntVal: 0,
- FltVal: val,
- }
-}
diff --git a/pkg/model/trace.go b/pkg/model/trace.go
deleted file mode 100644
index 41d39ac..0000000
--- a/pkg/model/trace.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package model
-
-// Trace model variant.
-type TraceModel struct {
- // TODO: define trace model
-}
-
-//TODO: MakeTrace