diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 93ea439..13ff0ed 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -25,9 +25,9 @@ If applicable, add screenshots to help explain your problem. **Environment (please complete the following information):** - OS: [e.g. Ubuntu 22.04] - - Numbat Versions: [e.g. v0.1] - Kubernetes Environment: [tip: Please include CRI and CNI as well as their versions] - Istio Environment: [tip: Please include Istio version as well as the install profiles] + - SentryFlow Versions: [e.g. v0.1] **Additional context** Add any other context about the problem here. diff --git a/.github/workflows/ci-test-go.yml b/.github/workflows/ci-test-go.yml index 53174c4..2227246 100644 --- a/.github/workflows/ci-test-go.yml +++ b/.github/workflows/ci-test-go.yml @@ -7,7 +7,7 @@ on: jobs: go-fmt-sentryflow: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -20,7 +20,7 @@ jobs: working-directory: sentryflow go-lint-sentryflow: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -33,7 +33,7 @@ jobs: working-directory: sentryflow go-sec-sentryflow: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -46,7 +46,7 @@ jobs: working-directory: sentryflow license: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/ci-test-py.yml b/.github/workflows/ci-test-py.yml index e9e8d51..9b3cc2c 100644 --- a/.github/workflows/ci-test-py.yml +++ b/.github/workflows/ci-test-py.yml @@ -7,7 +7,7 @@ on: jobs: py-pip-ai-sentryflow: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -20,8 +20,8 @@ jobs: pip install -r requirements.txt working-directory: ai-engine - py-lint-ai-sentryflow: - runs-on: ubuntu-20.04 + py-ruff-ai-sentryflow: + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -34,19 +34,45 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt working-directory: ai-engine - + + - name: Create pyproject.toml + run: | + echo "[tool.ruff.lint.per-file-ignores]" > pyproject.toml + echo '"stringlifier/*" = ["E402", "F811", "F401"]' >> pyproject.toml + working-directory: ai-engine + - name: Lint with Ruff run: | pip install ruff ruff --output-format=github . - continue-on-error: true + working-directory: ai-engine + + py-lint-ai-sentryflow: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.11' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pylint + working-directory: ai-engine + + - name: Lint with Pylint + run: | + pylint ai_engine.py working-directory: ai-engine py-pep8-ai-sentryflow: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: 'Run PEP8' uses: quentinguidee/pep8-action@v1 with: - arguments: '--max-line-length=120' + arguments: '--max-line-length=120 --exclude=*stringlifier/*,*protobuf/*' diff --git a/.github/workflows/sentryflow-pr-checks.yml b/.github/workflows/pr-checks.yml similarity index 96% rename from .github/workflows/sentryflow-pr-checks.yml rename to .github/workflows/pr-checks.yml index 1b84c0c..99e3146 100644 --- a/.github/workflows/sentryflow-pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -1,4 +1,4 @@ -name: sentryflow-pr-checks +name: pr-checks on: pull_request: diff --git a/.github/workflows/sentryflow-release-image.yml b/.github/workflows/release.yml similarity index 97% rename from .github/workflows/sentryflow-release-image.yml rename to .github/workflows/release.yml index e53fffc..904b408 100644 --- a/.github/workflows/sentryflow-release-image.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: sentryflow-release-image +name: release on: push: diff --git a/README.md b/README.md index 4626c7f..8e722b1 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # SentryFlow -[![SentryFlow Docker Build](https://github.com/5gsec/SentryFlow/actions/workflows/sentryflow-release-image.yml/badge.svg)](https://github.com/5gsec/SentryFlow/actions/workflows/sentryflow-release-image.yml) [![CI Test](https://github.com/5gsec/SentryFlow/actions/workflows/ci-test-go.yml/badge.svg)](https://github.com/5gsec/SentryFlow/actions/workflows/ci-test-go.yml) [![ci-test-py](https://github.com/5gsec/SentryFlow/actions/workflows/ci-test-py.yml/badge.svg)](https://github.com/5gsec/SentryFlow/actions/workflows/ci-test-py.yml) +[![SentryFlow Docker Build](https://github.com/5gsec/sentryflow/actions/workflows/release.yml/badge.svg)](https://github.com/5gsec/sentryflow/actions/workflows/release.yml) [![CI for SentryFlow](https://github.com/5gsec/sentryflow/actions/workflows/ci-test-go.yml/badge.svg)](https://github.com/5gsec/sentryflow/actions/workflows/ci-test-go.yml) [![CI for AI Engine](https://github.com/5gsec/sentryflow/actions/workflows/ci-test-py.yml/badge.svg)](https://github.com/5gsec/sentryflow/actions/workflows/ci-test-py.yml) SentryFlow is a cloud-native system for API observability and security, specializing in log collection, metric production, and data exportation. @@ -9,15 +9,18 @@ SentryFlow is a cloud-native system for API observability and security, speciali ![SentryFlow_Overview](docs/sentryflow_overview.png) ### Features + - Generation of API Access Logs -- Proudction of API Metrics and Statistics -- Inference of API Specifications +- Production of API Metrics +- AI-driven API Classification (Inference) ## Documentation ### Basic Information + - [Getting Started](docs/getting_started.md) - [Use Cases](examples/README.md) ### Contribution + - [Contribution Guide](contribution/README.md) diff --git a/ai-engine/.dockerignore b/ai-engine/.dockerignore index 23ca759..9767db2 100644 --- a/ai-engine/.dockerignore +++ b/ai-engine/.dockerignore @@ -1,6 +1,6 @@ -.idea .git .gitignore -protobuf +__pycache__/ Dockerfile -__pycache__/ \ No newline at end of file +protobuf/ +.idea/ \ No newline at end of file diff --git a/ai-engine/.gitignore b/ai-engine/.gitignore index 533d889..8ae8ce5 100644 --- a/ai-engine/.gitignore +++ b/ai-engine/.gitignore @@ -1,3 +1,2 @@ -.idea/ __pycache__/ -protobuf/ \ No newline at end of file +.idea/ diff --git a/ai-engine/.pylintrc b/ai-engine/.pylintrc new file mode 100644 index 0000000..1ffed1a --- /dev/null +++ b/ai-engine/.pylintrc @@ -0,0 +1,639 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules=protobuf,stringlifier + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + +# Let 'consider-using-join' be raised when the separator to join on would be +# non-empty (resulting in expected fixes of the type: ``"- " + " - +# ".join(items)``) +suggest-join-with-non-empty-separator=yes + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/ai-engine/Dockerfile b/ai-engine/Dockerfile index 1e40850..c567eab 100644 --- a/ai-engine/Dockerfile +++ b/ai-engine/Dockerfile @@ -1,28 +1,21 @@ # SPDX-License-Identifier: Apache-2.0 -# Dockerfile -FROM ubuntu:latest +FROM python:3.11-bullseye -RUN apt-get update && apt-get -y install python3 python3-pip wget git +RUN mkdir -p /ai-engine/protobuf -RUN git clone https://github.com/isu-kim/stringlifier.git -WORKDIR ./stringlifier -RUN pip install . +WORKDIR /ai-engine -RUN mkdir /app -WORKDIR /app COPY /ai-engine . - -# Build protobuf for Python -RUN pip install grpcio grpcio-tools -RUN mkdir protobuf/ COPY /protobuf ./protobuf -# Due to python import bugs, we have to compile protoc using this command -# Refer to https://github.com/protocolbuffers/protobuf/issues/1491#issuecomment-261621112 for more information on this -RUN python3 -m grpc_tools.protoc --python_out=. --pyi_out=. --grpc_python_out=. -I=. protobuf/sentryflow_metrics.proto +WORKDIR /ai-engine/stringlifier + +RUN pip3 --no-cache-dir install . -WORKDIR /app -RUN pip install -r requirements.txt +WORKDIR /ai-engine + +RUN pip3 --no-cache-dir install -r requirements.txt +RUN python3 -m grpc_tools.protoc --python_out=. --pyi_out=. --grpc_python_out=. -I=. protobuf/sentryflow_metrics.proto -CMD ["python3", "ai-engine.py"] +CMD ["python3", "./ai_engine.py"] diff --git a/ai-engine/Makefile b/ai-engine/Makefile index 2c89af4..f054509 100644 --- a/ai-engine/Makefile +++ b/ai-engine/Makefile @@ -5,6 +5,13 @@ IMAGE_NAME = 5gsec/$(ENGINE_NAME) TAG = v0.1 .PHONY: build-image - build-image: docker build -t $(IMAGE_NAME):$(TAG) -f ./Dockerfile ../ + +.PHONY: clean-image +clean-image: + docker rmi $(IMAGE_NAME):$(TAG) + +.PHONY: run-image +run-image: + docker run -it --rm $(IMAGE_NAME):$(TAG) diff --git a/ai-engine/ai-engine.py b/ai-engine/ai_engine.py similarity index 63% rename from ai-engine/ai-engine.py rename to ai-engine/ai_engine.py index eea12f7..5ea731d 100644 --- a/ai-engine/ai-engine.py +++ b/ai-engine/ai_engine.py @@ -1,12 +1,17 @@ -import os -import grpc +# SPDX-License-Identifier: Apache-2.0 + +"""SentryFlow AI Engine for API Classification""" -from stringlifier.api import Stringlifier from concurrent import futures from collections import Counter -from protobuf import sentryflow_metrics_pb2_grpc +import os +import grpc + from protobuf import sentryflow_metrics_pb2 +from protobuf import sentryflow_metrics_pb2_grpc + +from stringlifier.api import Stringlifier class HandlerServer: @@ -14,21 +19,21 @@ class HandlerServer: Class for gRPC Servers """ def __init__(self): + self.server = None + self.grpc_servers = [] + try: - self.listen_addr = os.environ["AI_ENGINE_ADDRESS"] + self.listen_addr = os.environ["AI_ENGINE"] except KeyError: self.listen_addr = "0.0.0.0:5000" - self.server = None - self.grpc_servers = list() - def init_grpc_servers(self): """ init_grpc_servers method that initializes and registers gRPC servers :return: None """ self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - self.grpc_servers.append(APIClassificationServer()) # @todo: make this configurable + self.grpc_servers.append(APIClassifierServer()) grpc_server: GRPCServer for grpc_server in self.grpc_servers: @@ -36,12 +41,13 @@ def init_grpc_servers(self): def serve(self): """ - serve method that starts serving gRPC servers, this is blocking function. + serve method that starts serving the gRPC servers (blocking function) :return: None """ self.server.add_insecure_port(self.listen_addr) - print("[INFO] Starting to serve on {}".format(self.listen_addr)) + print(f"[INFO] Starting to serve on {self.listen_addr}") + self.server.start() self.server.wait_for_termination() @@ -56,39 +62,41 @@ def register(self, server): :param server: The server :return: None """ - pass + + def unregister(self, server): + """ + unregister method that unregisters gRPC service from target server + :param server: The server + :return: None + """ -class APIClassificationServer(sentryflow_metrics_pb2_grpc.SentryFlowMetricsServicer, GRPCServer): +class APIClassifierServer(sentryflow_metrics_pb2_grpc.APIClassifierServicer, GRPCServer): """ Class for API Classification Server using Stringlifier """ - def __init__(self): self.stringlifier = Stringlifier() print("[Init] Successfully initialized APIClassificationServer") def register(self, server): - sentryflow_metrics_pb2_grpc.add_SentryFlowMetricsServicer_to_server(self, server) + sentryflow_metrics_pb2_grpc.add_APIClassifierServicer_to_server(self, server) - def GetAPIClassification(self, request_iterator, context): + def ClassifyAPIs(self, request_iterator, _): # pylint: disable=C0103 """ - GetAPIClassification method that runs multiple API ML Classification at once + ClassifyAPIs method that runs multiple MLs for API Classification at once :param request_iterator: The requests :param context: The context :return: The results """ - for req in request_iterator: - all_paths = req.path - # for paths in all_paths: + all_paths = req.API ml_results = self.stringlifier(all_paths) ml_counts = Counter(ml_results) + print(f"{all_paths} -> {ml_counts}") - print("{} -> {}".format(all_paths, ml_counts)) - - yield sentryflow_metrics_pb2.APIClassificationResponse(fields=ml_counts) + yield sentryflow_metrics_pb2.APIClassifierResponse(APIs=ml_counts) if __name__ == '__main__': diff --git a/ai-engine/requirements.txt b/ai-engine/requirements.txt index 7c37043..ffaf09f 100644 Binary files a/ai-engine/requirements.txt and b/ai-engine/requirements.txt differ diff --git a/ai-engine/stringlifier/CODE_OF_CONDUCT.md b/ai-engine/stringlifier/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..5405eda --- /dev/null +++ b/ai-engine/stringlifier/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Adobe Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at Grp-opensourceoffice@adobe.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/ai-engine/stringlifier/CONTRIBUTING.md b/ai-engine/stringlifier/CONTRIBUTING.md new file mode 100644 index 0000000..4ef5c84 --- /dev/null +++ b/ai-engine/stringlifier/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing + +Thanks for choosing to contribute! + +The following are a set of guidelines to follow when contributing to this project. + +## Code Of Conduct + +This project adheres to the Adobe [code of conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report unacceptable behavior to Grp-opensourceoffice@adobe.com. + +## Contributor License Agreement + +All third-party contributions to this project must be accompanied by a signed contributor license agreement. This gives Adobe permission to redistribute your contributions as part of the project. [Sign our CLA](http://opensource.adobe.com/cla.html). You only need to submit an Adobe CLA one time, so if you have submitted one previously, you are good to go! + +## Code Reviews + +All submissions should come in the form of pull requests and need to be reviewed by project committers. Read [GitHub's pull request documentation](https://help.github.com/articles/about-pull-requests/) for more information on sending pull requests. + +Lastly, please follow the [pull request template](.github/PULL_REQUEST_TEMPLATE.md) when submitting a pull request! diff --git a/ai-engine/stringlifier/COPYRIGHT b/ai-engine/stringlifier/COPYRIGHT new file mode 100644 index 0000000..daa48aa --- /dev/null +++ b/ai-engine/stringlifier/COPYRIGHT @@ -0,0 +1,16 @@ +The following copyright message should appear at the top of all +source files. This file can be removed from your repository. + +Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/ai-engine/stringlifier/LICENSE b/ai-engine/stringlifier/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/ai-engine/stringlifier/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ai-engine/stringlifier/MANIFEST.in b/ai-engine/stringlifier/MANIFEST.in new file mode 100644 index 0000000..6ba4036 --- /dev/null +++ b/ai-engine/stringlifier/MANIFEST.in @@ -0,0 +1,6 @@ +include data/string-c.bestType +include data/string-c.conf +include data/string-c.encodings +include data/enhanced-c.bestType +include data/enhanced-c.conf +include data/enhanced-c.encodings \ No newline at end of file diff --git a/ai-engine/stringlifier/README.md b/ai-engine/stringlifier/README.md new file mode 100644 index 0000000..259537e --- /dev/null +++ b/ai-engine/stringlifier/README.md @@ -0,0 +1,89 @@ +[![Downloads](https://pepy.tech/badge/stringlifier)](https://pepy.tech/project/stringlifier) [![Downloads](https://pepy.tech/badge/stringlifier/month)](https://pepy.tech/project/stringlifier/month) ![Weekly](https://img.shields.io/pypi/dw/stringlifier.svg) ![daily](https://img.shields.io/pypi/dd/stringlifier.svg) +![Version](https://badge.fury.io/py/stringlifier.svg) [![Python 3](https://img.shields.io/badge/python-3-blue.svg)](https://www.python.org/downloads/release/python-360/) [![GitHub stars](https://img.shields.io/github/stars/adobe/stringlifier.svg?style=social&label=Star&maxAge=2592000)](https://github.com/adobe/stringlifier/stargazers/) + +# stringlifier +String-classifier - is a python module for detecting random string and hashes text/code. + +Typical usage scenarios include: + +* Sanitizing application or security logs +* Detecting accidentally exposed credentials (complex passwords or api keys) + +# Interactive notebook + +You can see Stringlifier in action by checking out this [interactive notebook hosted on Colaboratory](https://colab.research.google.com/drive/1bgZQSKhVAYU4r46wqb0v8Sfvuo_yMOLA?usp=sharing). + +# Quick start guide + +You can quickly use stringlifier via pip-installation: +```bash +$ pip install stringlifier +``` +In case you are using the pip3 installation that comes with Python3, use pip3 instead of pip in the above command. +```bash +$ pip3 install . # in the root directory +``` + +API example: +```python +from stringlifier.api import Stringlifier + +stringlifier=Stringlifier() + +s = stringlifier("com.docker.hyperkit -A -u -F vms/0/hyperkit.pid -c 8 -m 8192M -b 127.0.0.1 --pass=\"NlcXVpYWRvcg\" -s 0:0,hostbridge -s 31,lpc -s 1:0,virtio-vpnkit,path=vpnkit.eth.sock,uuid=45172425-08d1-41ec-9d13-437481803412 -U c6fb5010-a83e-4f74-9a5a-50d9086b9") +``` + +After this, `s` should be: + +```'com.docker.hyperkit -A -u -F vms/0/hyperkit.pid -c 8 -m 8192M -b --pass="" -s 0:0,hostbridge -s 31,lpc -s 1:0,virtio-vpnkit,path=vpnkit.eth.sock,uuid= -U '``` + +You can also choose to see the full tokenization and classification output: + +```python +s, tokens = stringlifier("com.docker.hyperkit -A -u -F vms/0/hyperkit.pid -c 8 -m 8192M -b 127.0.0.1 --pass=\"NlcXVpYWRvcg\" -s 0:0,hostbridge -s 31,lpc -s 1:0,virtio-vpnkit,path=vpnkit.eth.sock,uuid=45172425-08d1-41ec-9d13-437481803412 -U c6fb5010-a83e-4f74-9a5a-50d9086b9", return_tokens=True) +``` + +`s` will be the same as before and `tokens` will contain the following data: +```python +[[('0', 33, 34, ''), + ('8', 51, 52, ''), + ('8192', 56, 60, ''), + ('127.0.0.1', 65, 74, ''), + ('NlcXVpYWRvcg', 83, 95, ''), + ('0', 100, 101, ''), + ('0', 102, 103, ''), + ('31', 118, 120, ''), + ('1', 128, 129, ''), + ('0', 130, 131, ''), + ('45172425-08d1-41ec-9d13-437481803412', 172, 208, ''), + ('c6fb5010-a83e-4f74-9a5a-50d9086b9', 212, 244, '')]] +``` + + + +# Building your own classifier + +You can also train your own model if you want to detect different types of strings. For this you can use the Command Line Interface for the string classifier: + +```bash +$ python3 stringlifier/modules/stringc.py --help + +Usage: stringc.py [options] + +Options: + -h, --help show this help message and exit + --interactive + --train + --resume + --train-file=TRAIN_FILE + --dev-file=DEV_FILE + --store=OUTPUT_BASE + --patience=PATIENCE (default=20) + --batch-size=BATCH_SIZE + (default=32) + --device=DEVICE +``` + +For instructions on how to generate your training data, use [this link](corpus/README.md). + +**Important note:** This model might not scale if detecting a type of string depends on the surrounding tokens. In this case, you can look at a more advanced tool for sequence processing such as [NLP-Cube](https://github.com/adobe/NLP-Cube) diff --git a/ai-engine/stringlifier/corpus/README.md b/ai-engine/stringlifier/corpus/README.md new file mode 100644 index 0000000..0576f2a --- /dev/null +++ b/ai-engine/stringlifier/corpus/README.md @@ -0,0 +1,23 @@ +# Standard training data + +The training data was generated by running `scripts/01-generate-synthetic-training-data.py` and `scripts/02-split-generated-data.py` on a list of common english words, available [here](https://raw.githubusercontent.com/dwyl/english-words/master/words_alpha.txt). + +# Generating your own training data + +If you want to generate your own dataset, you simply need to create a training and a validation file. They follow a simple format: + +```text + +``` + +**Example** + +```text +ngnix STRING PROGRAM +Y29tbWl4dHVyZQ== HASH PASSWORD +b3d2cf2ec3894374b37d1b79edd57ad4 HASH API_KEY +9c795829-75bc-4596-87d3-3508372bbf5f HASH API_KEY +licenser STRING WORD +``` + +**NOTE:** There are no predefined values for `type` and `subtype`. \ No newline at end of file diff --git a/ai-engine/stringlifier/data/enhanced-c.bestType b/ai-engine/stringlifier/data/enhanced-c.bestType new file mode 100644 index 0000000..ac09942 Binary files /dev/null and b/ai-engine/stringlifier/data/enhanced-c.bestType differ diff --git a/ai-engine/stringlifier/data/enhanced-c.conf b/ai-engine/stringlifier/data/enhanced-c.conf new file mode 100644 index 0000000..8c6a2e5 --- /dev/null +++ b/ai-engine/stringlifier/data/enhanced-c.conf @@ -0,0 +1 @@ +{"char_emb_size": 100, "rnn_layers": 2, "rnn_size": 100, "hidden": 500} \ No newline at end of file diff --git a/ai-engine/stringlifier/data/enhanced-c.encodings b/ai-engine/stringlifier/data/enhanced-c.encodings new file mode 100644 index 0000000..bcc309d --- /dev/null +++ b/ai-engine/stringlifier/data/enhanced-c.encodings @@ -0,0 +1 @@ +{"char2int": {"": 0, "": 1, "{": 2, "+": 3, "c": 4, "r": 5, "e": 6, "a": 7, "m": 8, "i": 9, "l": 10, "y": 11, "}": 12, " ": 13, "$": 14, "5": 15, "f": 16, "9": 17, "1": 18, "3": 19, "8": 20, "2": 21, "-": 22, "7": 23, "0": 24, "4": 25, "d": 26, "6": 27, "b": 28, "x": 29, "t": 30, "w": 31, "u": 32, "v": 33, "n": 34, "h": 35, "o": 36, "%": 37, "q": 38, "<": 39, "s": 40, "g": 41, "/": 42, "p": 43, "#": 44, "j": 45, "k": 46, "z": 47, ".": 48, "_": 49, ":": 50, "*": 51, "=": 52, ",": 53, "&": 54, "'": 55, "?": 56, "\"": 57, ">": 58, "!": 59, "(": 60, ")": 61, "\\": 62, "[": 63, "]": 64, "|": 65, "`": 66, "~": 67, ";": 68, "@": 69}, "label2int": {"": 0, "C": 1, "U": 2, "H": 3, "J": 4, "N": 5, "I": 6}} \ No newline at end of file diff --git a/ai-engine/stringlifier/data/enhanced-c.last b/ai-engine/stringlifier/data/enhanced-c.last new file mode 100644 index 0000000..0a9cfef Binary files /dev/null and b/ai-engine/stringlifier/data/enhanced-c.last differ diff --git a/ai-engine/stringlifier/requirements.txt b/ai-engine/stringlifier/requirements.txt new file mode 100644 index 0000000..9f64b9a --- /dev/null +++ b/ai-engine/stringlifier/requirements.txt @@ -0,0 +1,6 @@ +ipdb==0.13.4 +nptyping==1.3.0 +numpy==1.22.0 +PyJWT==1.7.1 +torch==1.13.1 +tqdm==4.50.2 diff --git a/ai-engine/stringlifier/scripts/01-01-generate-synthetic-training-data.py b/ai-engine/stringlifier/scripts/01-01-generate-synthetic-training-data.py new file mode 100644 index 0000000..d3fc473 --- /dev/null +++ b/ai-engine/stringlifier/scripts/01-01-generate-synthetic-training-data.py @@ -0,0 +1,51 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +known_words = [] + + +def generate_words(count, known_words): + import uuid + import datetime + import base64 + generated = [] + for ii in range(count): + if ii % 4 == 0: + generated.append(str(uuid.uuid4())) + elif ii % 4 == 1: + generated.append(str(uuid.uuid4().hex)) + elif ii % 4 == 2: + generated.append(str(datetime.datetime.now().timestamp())) + elif ii % 4 == 3: + message = known_words[ii] + message_bytes = message.encode('ascii') + base64_bytes = base64.b64encode(message_bytes) + base64_message = base64_bytes.decode('ascii') + generated.append(base64_message) + return generated + + +lines = open('corpus/words_alpha.txt').readlines() +for line in lines: + known_words.append(line.strip()) + +generated_words = generate_words(len(known_words), known_words) + +f = open('corpus/generated', 'w') +for ii in range(len(known_words)): + f.write(known_words[ii] + '\tSTRING\n') + f.write(generated_words[ii] + '\tHASH\n') +f.close() diff --git a/ai-engine/stringlifier/scripts/01-02-generate-enhanced-synthetic-training-data.py b/ai-engine/stringlifier/scripts/01-02-generate-enhanced-synthetic-training-data.py new file mode 100644 index 0000000..f51835a --- /dev/null +++ b/ai-engine/stringlifier/scripts/01-02-generate-enhanced-synthetic-training-data.py @@ -0,0 +1,141 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +known_words = [] + + +def generate_words(count, known_words): + import uuid + import datetime + import base64 + generated = [] + for ii in range(count): + if ii % 4 == 0: + generated.append(str(uuid.uuid4())) + elif ii % 4 == 1: + generated.append(str(uuid.uuid4().hex)) + elif ii % 4 == 2: + generated.append(str(datetime.datetime.now().timestamp())) + elif ii % 4 == 3: + message = known_words[ii] + message_bytes = message.encode('ascii') + base64_bytes = base64.b64encode(message_bytes) + base64_message = base64_bytes.decode('ascii') + generated.append(base64_message) + return generated + + +lines = open('corpus/words_alpha.txt').readlines() +for line in lines: + known_words.append(line.strip()) + +generated_words = generate_words(len(known_words), known_words) + +f = open('corpus/generated-enhanced', 'w') + +total_clis = (len(generated_words) + len(known_words)) + +known_index = 0 +gen_index = 0 + +import random + + +def _get_next_known(): + global known_index + s = known_words[known_index] + known_index += 1 + if known_index == len(known_words): + known_index = 0 + return s + + +def _get_next_gen(): + global gen_index + s = generated_words[gen_index] + gen_index += 1 + if gen_index == len(generated_words): + gen_index = 0 + return s + + +import random + + +def _generate_next_cmd(): + delimiters = ' /.,?!~|<>-=_~:;\\+-&*%$#@!' + enclosers = '[]{}``""\'\'()' + mask = '' + cmd = '' + num_words = random.randint(3, 15) + use_space = False + use_delimiter = False + use_encloser = False + append_number = False + for ii in range(num_words): + + use_delimiter = random.random() > 0.5 + use_encloser = random.random() > 0.8 + use_gen_word = random.random() > 0.7 + case_style = random.randint(0, 2) + use_gen_word = random.random() > 0.7 + + del_index = random.randint(0, len(delimiters) - 1) + enc_index = random.randint(0, len(enclosers) // 2 - 1) * 2 + if use_space: + mask += 'C' + cmd += ' ' + if use_gen_word: + wrd = _get_next_gen() + if case_style == 1: + wrd = wrd[0].upper() + wrd[1:] + elif case_style == 2: + wrd = wrd.upper() + msk = '' + for _ in range(len(wrd)): + msk += 'H' + else: + wrd = _get_next_known() + append_number = random.random() > 0.97 + if append_number: + wrd = wrd + str(random.randint(0, 9999)) + if case_style == 1: + wrd = wrd[0].upper() + wrd[1:] + elif case_style == 2: + wrd = wrd.upper() + msk = '' + for _ in range(len(wrd)): + msk += 'C' + + if use_delimiter: + wrd = delimiters[del_index] + wrd + msk = 'C' + msk + if use_encloser: + wrd = enclosers[enc_index] + wrd + enclosers[enc_index + 1] + msk = 'C' + msk + 'C' + + cmd += wrd + mask += msk + use_space = random.random() > 0.7 + + return cmd, mask + + +for ii in range(total_clis): + command, mask = _generate_next_cmd() + f.write(command + '\n' + mask + '\n') + +f.close() diff --git a/ai-engine/stringlifier/scripts/02-02-split-generated-enhanced-data.py b/ai-engine/stringlifier/scripts/02-02-split-generated-enhanced-data.py new file mode 100644 index 0000000..1c6df76 --- /dev/null +++ b/ai-engine/stringlifier/scripts/02-02-split-generated-enhanced-data.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +lines = open('corpus/generated-enhanced').readlines() +f_train = open('corpus/enhanced-train', 'w') +f_dev = open('corpus/enhanced-dev', 'w') + +for ii in range(len(lines) // 2): + word = lines[ii * 2] + mask = lines[ii * 2 + 1] + f = f_train + if ii % 10 == 5: + f = f_dev + f.write(word + mask) + +f_train.close() +f_dev.close() diff --git a/ai-engine/stringlifier/scripts/02-split-generated-data.py b/ai-engine/stringlifier/scripts/02-split-generated-data.py new file mode 100644 index 0000000..f91a6b4 --- /dev/null +++ b/ai-engine/stringlifier/scripts/02-split-generated-data.py @@ -0,0 +1,78 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +string_list = [] +hash_list = [] + +lines = open('corpus/generated').readlines() + +for line in lines: + parts = line.strip().split('\t') + if parts[1] == 'STRING': + string_list.append(parts[0]) + else: + hash_list.append(parts[0]) + +train_data = [ + ('usr', 'STRING'), + ('var', 'STRING'), + ('lib', 'STRING'), + ('etc', 'STRING'), + ('tmp', 'STRING'), + ('dev', 'STRING'), + ('libexec', 'STRING'), + ('lib32', 'STRING'), + ('lib64', 'STRING'), + ('bin', 'STRING') +] +dev_data = [] + + +def add_data(train, dev, list, label): + for ii in range(len(list)): + if ii % 10 == 0: + dev.append((list[ii], label)) + else: + train.append((list[ii], label)) + + +add_data(train_data, dev_data, string_list, "STRING") +add_data(train_data, dev_data, hash_list, "HASH") + +import random + +random.shuffle(train_data) +random.shuffle(dev_data) + +f_train = open('corpus/string-train', 'w') +f_dev = open('corpus/string-dev', 'w') + +for ii in range(len(train_data)): + if train_data[ii][1] == 'HASH': + stype = 'HASH' + else: + stype = 'WORD' + f_train.write(train_data[ii][0] + '\t' + train_data[ii][1] + '\t' + stype + '\n') +for ii in range(len(dev_data)): + if dev_data[ii][1] == 'HASH': + stype = 'HASH' + else: + stype = 'WORD' + f_dev.write(dev_data[ii][0] + '\t' + dev_data[ii][1] + '\t' + stype + '\n') + +f_train.close() +f_dev.close() diff --git a/ai-engine/stringlifier/setup.py b/ai-engine/stringlifier/setup.py new file mode 100644 index 0000000..f9bbdc5 --- /dev/null +++ b/ai-engine/stringlifier/setup.py @@ -0,0 +1,38 @@ +import setuptools + + +def parse_requirements(filename, session=None): + """ load requirements from a pip requirements file """ + lineiter = (line.strip() for line in open(filename)) + return [line for line in lineiter if line and not line.startswith("#")] + + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="stringlifier", + version="0.2", + author="Multiple authors", + description="Python module for detecting password, api keys hashes and any other string that resembles a " + "randomly generated character sequence. Originated from https://github.com/adobe/stringlifier, " + "this package updated dependencies for up to date python versions", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/adobe/stringlifier", + packages=setuptools.find_packages(), + install_requires=parse_requirements('requirements.txt', session=False), + classifiers=( + "Programming Language :: Python :: 3.0", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + ), + include_package_data=True, + package_data={ + '': ['data/string-c.encodings', 'data/string-c.conf', 'data/string-c.bestType', 'data/enhanced-c.encodings', + 'data/enhanced-c.conf', 'data/enhanced-c.bestType'] + + }, + # data_files=['data/string-c.encodings', 'data/string-c.conf', 'data/string-c.bestType'], + zip_safe=False +) diff --git a/ai-engine/stringlifier/stringlifier/__init__.py b/ai-engine/stringlifier/stringlifier/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ai-engine/stringlifier/stringlifier/api.py b/ai-engine/stringlifier/stringlifier/api.py new file mode 100644 index 0000000..0d93b09 --- /dev/null +++ b/ai-engine/stringlifier/stringlifier/api.py @@ -0,0 +1,194 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from nptyping import NDArray, Int64 +from stringlifier.modules.stringc import AwDoC, AwDoCConfig, Encodings +from stringlifier.modules.stringc2 import CTagger, CTaggerConfig +from stringlifier.modules.stringc2 import Encodings as CEncodings +import torch +from typing import List, Optional, Tuple, Union +import pkg_resources + + +class Stringlifier: + def __init__(self, model_base: Optional[str] = None): + encodings = CEncodings() + if model_base is None: + enc_file = pkg_resources.resource_filename(__name__, 'data/enhanced-c.encodings') + conf_file = pkg_resources.resource_filename(__name__, 'data/enhanced-c.conf') + model_file = pkg_resources.resource_filename(__name__, 'data/enhanced-c.bestType') + else: + enc_file = '{0}.encodings'.format(model_base) + conf_file = '{0}.conf'.format(model_base) + model_file = '{0}.bestType'.format(model_base) + encodings.load(enc_file) + config = CTaggerConfig() + config.load(conf_file) + self.classifier = CTagger(config, encodings) + self.classifier.load(model_file) + self.classifier.eval() + self.encodings = encodings + self._c_index: int = encodings._label2int['C'] + + def __call__(self, string_or_list: Union[str, List[str]], return_tokens: bool = False, cutoff: int = 5) -> Union[ + Tuple[List[str], List[List[Tuple[str, int, int, str]]]], List[str]]: + if isinstance(string_or_list, str): + tokens = [string_or_list] + else: + tokens = string_or_list + + max_len = max([len(s) for s in tokens]) + if max_len == 0: + if return_tokens: + return [''], [] + else: + return [''] + + with torch.no_grad(): + p_ts = self.classifier(tokens) + + p_ts = torch.argmax(p_ts, dim=-1).detach().cpu().numpy() + ext_tokens: List[List[Tuple[str, int, int, str]]] = [] + new_strings: List[str] = [] + + for iBatch in range(p_ts.shape[0]): + new_str, toks = self._extract_tokens(tokens[iBatch], p_ts[iBatch], cutoff=cutoff) + new_strings.append(new_str) + ext_tokens.append(toks) + + if return_tokens: + return new_strings, ext_tokens + else: + return new_strings + + def _extract_tokens_2class(self, string: str, pred: NDArray[Int64]) -> Tuple[str, List[Tuple[str, int, int]]]: + CUTOFF = 5 + mask = '' + for p in pred: + mask += self.encodings._label_list[p] + start = 0 + tokens: List[Tuple[str, int, int]] = [] + c_tok = '' + for ii in range(len(string)): + if mask[ii] == 'C': + # check if we have a token + + if c_tok != '': + stop = ii + tokens.append((c_tok, start, stop)) + c_tok = '' + else: + if c_tok == '': + start = ii + c_tok += string[ii] + if c_tok != '': + stop = len(string) + tokens.append((c_tok, start, stop)) + + # filter small tokens + final_toks: List[Tuple[str, int, int]] = [] + for token in tokens: + if token[2] - token[1] > CUTOFF: + final_toks.append(token) + # compose new string + new_str: str = '' + last_pos = 0 + for token in final_toks: + if token[1] > last_pos: + new_str += string[last_pos:token[1]] + new_str += token[0] + last_pos = token[2] + 1 + if last_pos < len(string): + new_str += string[last_pos:] + return new_str, final_toks + + def _extract_tokens(self, string: str, pred: NDArray[Int64], cutoff: int = 5) -> Tuple[ + str, List[Tuple[str, int, int, str]]]: + mask = '' + numbers = {str(ii): 1 for ii in range(10)} + + for ii in range(len(pred)): + p = pred[ii] + cls = self.encodings._label_list[p] + if ii < len(string) and cls == 'C' and string[ii] in numbers: + mask += 'N' + else: + mask += cls + start = 0 + tokens = [] + c_tok = '' + last_label = mask[0] + type_: Optional[str] = None + for ii in range(len(string)): + # check if the label-type has changed + if last_label != mask[ii]: + if c_tok != '': + if last_label == 'C': + pass + elif last_label == 'H': + type_ = '' + elif last_label == 'N': + type_ = '' + elif last_label == 'I': + type_ = '' + elif last_label == 'U': + type_ = '' + elif last_label == 'J': + type_ = '' + + if last_label != 'C' and type_ is not None: + tokens.append((c_tok, start, ii, type_)) + c_tok = '' + start = ii + + last_label = mask[ii] + c_tok += string[ii] + + if c_tok != '': + if last_label == 'C': + pass + elif last_label == 'H': + type_ = '' + elif last_label == 'N': + type_ = '' + elif last_label == 'I': + type_ = '' + elif last_label == 'U': + type_ = '' + elif last_label == 'J': + type_ = '' + if last_label != 'C' and type_ is not None: + tokens.append((c_tok, start, ii, type_)) + + # filter small tokens + final_toks: List[Tuple[str, int, int, str]] = [] + for token in tokens: + if token[2] - token[1] > cutoff: + final_toks.append(token) + # compose new string + new_str: str = '' + last_pos = 0 + + # from ipdb import set_trace + # set_trace() + for token in final_toks: + if token[1] > last_pos: + new_str += string[last_pos:token[1]] + new_str += token[3] + last_pos = token[2] + if last_pos < len(string) - 1: + new_str += string[last_pos:] + return new_str, final_toks diff --git a/ai-engine/stringlifier/stringlifier/data/enhanced-c.bestType b/ai-engine/stringlifier/stringlifier/data/enhanced-c.bestType new file mode 100644 index 0000000..ac09942 Binary files /dev/null and b/ai-engine/stringlifier/stringlifier/data/enhanced-c.bestType differ diff --git a/ai-engine/stringlifier/stringlifier/data/enhanced-c.conf b/ai-engine/stringlifier/stringlifier/data/enhanced-c.conf new file mode 100644 index 0000000..8c6a2e5 --- /dev/null +++ b/ai-engine/stringlifier/stringlifier/data/enhanced-c.conf @@ -0,0 +1 @@ +{"char_emb_size": 100, "rnn_layers": 2, "rnn_size": 100, "hidden": 500} \ No newline at end of file diff --git a/ai-engine/stringlifier/stringlifier/data/enhanced-c.encodings b/ai-engine/stringlifier/stringlifier/data/enhanced-c.encodings new file mode 100644 index 0000000..bcc309d --- /dev/null +++ b/ai-engine/stringlifier/stringlifier/data/enhanced-c.encodings @@ -0,0 +1 @@ +{"char2int": {"": 0, "": 1, "{": 2, "+": 3, "c": 4, "r": 5, "e": 6, "a": 7, "m": 8, "i": 9, "l": 10, "y": 11, "}": 12, " ": 13, "$": 14, "5": 15, "f": 16, "9": 17, "1": 18, "3": 19, "8": 20, "2": 21, "-": 22, "7": 23, "0": 24, "4": 25, "d": 26, "6": 27, "b": 28, "x": 29, "t": 30, "w": 31, "u": 32, "v": 33, "n": 34, "h": 35, "o": 36, "%": 37, "q": 38, "<": 39, "s": 40, "g": 41, "/": 42, "p": 43, "#": 44, "j": 45, "k": 46, "z": 47, ".": 48, "_": 49, ":": 50, "*": 51, "=": 52, ",": 53, "&": 54, "'": 55, "?": 56, "\"": 57, ">": 58, "!": 59, "(": 60, ")": 61, "\\": 62, "[": 63, "]": 64, "|": 65, "`": 66, "~": 67, ";": 68, "@": 69}, "label2int": {"": 0, "C": 1, "U": 2, "H": 3, "J": 4, "N": 5, "I": 6}} \ No newline at end of file diff --git a/ai-engine/stringlifier/stringlifier/data/enhanced-c.last b/ai-engine/stringlifier/stringlifier/data/enhanced-c.last new file mode 100644 index 0000000..0a9cfef Binary files /dev/null and b/ai-engine/stringlifier/stringlifier/data/enhanced-c.last differ diff --git a/ai-engine/stringlifier/stringlifier/modules/__init__.py b/ai-engine/stringlifier/stringlifier/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ai-engine/stringlifier/stringlifier/modules/stringc.py b/ai-engine/stringlifier/stringlifier/modules/stringc.py new file mode 100644 index 0000000..7270cc1 --- /dev/null +++ b/ai-engine/stringlifier/stringlifier/modules/stringc.py @@ -0,0 +1,394 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import torch.nn as nn +import optparse +import sys +import json +import numpy as np +import random +import tqdm + +sys.path.append('') + + +class Encodings: + def __init__(self, filename=None): + self._char2int = {'': 0, '': 1} + self._type2int = {} + self._subtype2int = {'': 0} # this will not get backpropagated + self._type_list = [] + self._subtype_list = [] + if filename is not None: + self.load(filename) + + def save(self, filename): + json.dump({'char2int': self._char2int, 'type2int': self._type2int, 'subtype2int': self._subtype2int}, + open(filename, 'w')) + + def load(self, file): + if isinstance(file, str): + stream = open(file, 'r') + else: + stream = file + obj = json.load(stream) + self._char2int = obj['char2int'] + self._type2int = obj['type2int'] + self._subtype2int = obj['subtype2int'] + self._type_list = [None for _ in range(len(self._type2int))] + self._subtype_list = [None for _ in range(len(self._subtype2int))] + for t in self._type2int: + self._type_list[self._type2int[t]] = t + + for t in self._subtype2int: + self._subtype_list[self._subtype2int[t]] = t + + def update_encodings(self, dataset, cutoff=2): + char2count = {} + for entry in dataset: + domain = entry[0] + ttype = entry[1] + tsubtype = entry[2] + for char in domain: + char = char.lower() + if char in char2count: + char2count[char] += 1 + else: + char2count[char] = 1 + if ttype not in self._type2int: + self._type2int[ttype] = len(self._type2int) + self._type_list.append(ttype) + if tsubtype not in self._subtype2int: + self._subtype2int[tsubtype] = len(self._subtype2int) + self._subtype_list.append(tsubtype) + + for char in char2count: + if char not in self._char2int: + self._char2int[char] = len(self._char2int) + + +class AwDoCConfig: + def __init__(self): + self.char_emb_size = 100 + self.rnn_layers = 2 + self.rnn_size = 100 + self.hidden = 500 + + def save(self, filename): + json.dump({'char_emb_size': self.char_emb_size, 'rnn_layers': self.rnn_layers, 'rnn_size': self.rnn_size, + 'hidden': self.hidden}, + open(filename, 'w')) + + def load(self, file): + if isinstance(file, str): + stream = open(file, 'r') + else: + stream = file + obj = json.load(stream) + self.char_emb_size = obj['char_emb_size'] + self.rnn_size = obj['rnn_size'] + self.rnn_layers = obj['rnn_layers'] + self.hidden = obj['hidden'] + + +class AwDoC(nn.Module): + def __init__(self, config, encodings): + super(AwDoC, self).__init__() + self._config = config + self._encodings = encodings + self._char_emb = nn.Embedding(len(encodings._char2int), config.char_emb_size) + + self._rnn = nn.LSTM(config.char_emb_size, config.rnn_size, config.rnn_layers, batch_first=True) + self._hidden = nn.Sequential(nn.Linear(config.rnn_size, config.hidden), nn.Tanh(), nn.Dropout(0.5)) + self._softmax_type = nn.Linear(config.hidden, len(encodings._type2int)) + self._softmax_subtype = nn.Linear(config.hidden, len(encodings._subtype2int)) + + def _make_input(self, domain_list): + # we pad domain names and feed them in reversed character order to the LSTM + max_seq_len = max([len(domain) for domain in domain_list]) + + x = np.zeros((len(domain_list), max_seq_len)) + for iBatch in range(x.shape[0]): + domain = domain_list[iBatch] + n = len(domain) + ofs_x = max_seq_len - n + for iSeq in range(x.shape[1]): + if iSeq < n: + char = domain[-iSeq - 1].lower() + if char in self._encodings._char2int: + iChar = self._encodings._char2int[char] + else: + iChar = self._encodings._char2int[''] + x[iBatch, iSeq + ofs_x] = iChar + return x + + def forward(self, domain_list): + + x = torch.tensor(self._make_input(domain_list), dtype=torch.long, device=self._get_device()) + hidden = self._char_emb(x) + hidden = torch.dropout(hidden, 0.5, self.training) + output, _ = self._rnn(hidden) + output = output[:, -1, :] + + hidden = self._hidden(output) + + return self._softmax_type(hidden), self._softmax_subtype(hidden) + + def save(self, path): + torch.save(self.state_dict(), path) + + def load(self, path): + self.load_state_dict(torch.load(path, map_location='cpu')) + + def _get_device(self): + if self._char_emb.weight.device.type == 'cpu': + return 'cpu' + return '{0}:{1}'.format(self._char_emb.weight.device.type, str(self._char_emb.weight.device.index)) + + +def _load_dataset(filename): + lines = open(filename, encoding='utf-8').readlines() + dataset = [] + for line in lines: + line = line.strip() + if line != '': + parts = line.split('\t') + if len(parts) == 3: + dataset.append(parts) + return dataset + + +def _eval(model, dataset, encodings): + model.eval() + test_x, test_y = _make_batches(dataset, batch_size=128) + total_t = 0 + total_st = 0 + ok_t = 0 + ok_st = 0 + with torch.no_grad(): + pgb = tqdm.tqdm(zip(test_x, test_y), total=len(test_x), ncols=80, desc='\t\t\t\t') + for x, y in pgb: + y_pred_t, y_pred_st = model(x) + y_tar_t, y_tar_st = _get_targets(y, encodings) + y_pred_t = torch.argmax(y_pred_t, dim=1).detach().cpu().numpy() + y_pred_st = torch.argmax(y_pred_st, dim=1).detach().cpu().numpy() + for y_t_t, y_t_st, y_p_t, y_p_st in zip(y_tar_t, y_tar_st, y_pred_t, y_pred_st): + total_t += 1 + if y_t_st != 0: + total_st += 1 + if y_t_st == y_p_st: + ok_st += 1 + if y_t_t == y_p_t: + ok_t += 1 + + return ok_t / total_t, ok_st / total_st + + +def _make_batches(dataset, batch_size=32): + batches_x = [] + batches_y = [] + + batch_x = [] + batch_y = [] + + for entry in dataset: + domain = entry[0] + t = entry[1] + st = entry[2] + batch_x.append(domain) + batch_y.append((t, st)) + if len(batch_x) == batch_size: + batches_x.append(batch_x) + batches_y.append(batch_y) + batch_x = [] + batch_y = [] + + if len(batch_x) != 0: + batches_x.append(batch_x) + batches_y.append(batch_y) + + return batches_x, batches_y + + +def _get_targets(y, encodings): + y_t = np.zeros((len(y))) + y_st = np.zeros((len(y))) + for i in range(len(y)): + y_t[i] = encodings._type2int[y[i][0]] + y_st[i] = encodings._subtype2int[y[i][1]] + + return y_t, y_st + + +def _drop_tld(domain_list, p): + new_list = [] + for domain in domain_list: + parts = domain.split('.') + dp = random.random() + if dp < p: + if dp < p / 2: + parts[-1] = ' ' + else: + parts[-1] = ' ' + dom = '.'.join(parts) + new_list.append(dom) + return new_list + + +def _start_train(params): + trainset = _load_dataset(params.train_file) + devset = _load_dataset(params.dev_file) + if params.resume: + encodings = Encodings('{0}.encodings'.format(params.output_base)) + else: + encodings = Encodings() + encodings.update_encodings(trainset) + print('chars={0}, types={1}, subtypes={2}'.format(len(encodings._char2int), len(encodings._type2int), + len(encodings._subtype2int))) + + config = AwDoCConfig() + if params.resume: + config.load('{0}.conf'.format(params.output_base)) + model = AwDoC(config, encodings) + model.to(params.device) + if params.resume: + model.load('{0}.last'.format(params.output_base)) + optimizer = torch.optim.Adam(model.parameters()) + criterion_t = torch.nn.CrossEntropyLoss() + criterion_st = torch.nn.CrossEntropyLoss(ignore_index=0) # we ignore unknown types + + patience_left = params.patience + best_type, best_subtype = _eval(model, devset, encodings) + encodings.save('{0}.encodings'.format(params.output_base)) + config.save('{0}.conf'.format(params.output_base)) + model.save('{0}.last'.format(params.output_base)) + print("Deveset evaluation type_acc={0} subtype_acc={1}".format(best_type, best_subtype)) + epoch = 0 + eval_at = 5000 + while patience_left > 0: + epoch += 1 + random.shuffle(trainset) + train_x, train_y = _make_batches(trainset, batch_size=params.batch_size) + sys.stdout.write('Starting epoch {0}\n'.format(epoch)) + + pgb = tqdm.tqdm(zip(train_x, train_y), total=len(train_x), ncols=80, desc='\tloss=N/A') + model.train() + total_loss = 0 + cnt = 0 + for x, y in pgb: + cnt += 1 + if cnt % eval_at == 0: + patience_left -= 1 + sys.stderr.flush() + sys.stderr.flush() + sys.stderr.write('\n\tEvaluating...') + sys.stderr.flush() + acc_t, acc_st = _eval(model, devset, encodings) + sys.stderr.write(' type_acc={0}, subtype_acc={1}\n'.format(acc_t, acc_st)) + sys.stderr.flush() + filename = '{0}.last'.format(params.output_base) + sys.stderr.write('\t\tStoring {0}\n'.format(filename)) + sys.stderr.flush() + model.save(filename) + if acc_t > best_type: + patience_left = params.patience + best_type = acc_t + filename = '{0}.bestType'.format(params.output_base) + sys.stderr.write('\t\tStoring {0}\n'.format(filename)) + sys.stderr.flush() + model.save(filename) + if acc_st > best_subtype: + patience_left = params.patience + best_subtype = acc_st + filename = '{0}.bestSubtype'.format(params.output_base) + sys.stderr.write('\t\tStoring {0}\n'.format(filename)) + sys.stderr.flush() + model.save(filename) + sys.stderr.write('\n') + sys.stderr.flush() + model.train() + if patience_left <= 0: + print("Stopping with maximum patience reached") + sys.exit(0) + + x = _drop_tld(x, 0.5) + y_pred_t, y_pred_st = model(x) + + y_tar_t, y_tar_st = _get_targets(y, encodings) + y_tar_t = torch.tensor(y_tar_t, dtype=torch.long, device=params.device) + y_tar_st = torch.tensor(y_tar_st, dtype=torch.long, device=params.device) + + loss = criterion_t(y_pred_t, y_tar_t) + \ + criterion_st(y_pred_st, y_tar_st) + + optimizer.zero_grad() + total_loss += loss.item() + pgb.set_description('\tloss={0:.4f}'.format(total_loss / cnt)) + loss.backward() + optimizer.step() + + sys.stdout.write('AVG train loss={0}\n'.format(total_loss / len(train_x))) + + +def _start_interactive(params): + encodings = Encodings('{0}.encodings'.format(params.output_base)) + config = AwDoCConfig() + config.load('{0}.conf'.format(params.output_base)) + model = AwDoC(config, encodings) + model.load('{0}.bestType'.format(params.output_base)) + model.to(params.device) + model.eval() + sys.stdout.write('>>> ') + sys.stdout.flush() + domain = input() + while domain != '/exit': + p_t, p_st = model([domain]) + print(p_t) + print(p_st) + p_d_t = torch.argmax(p_t, dim=1).detach().cpu().item() + p_d_st = torch.argmax(p_st, dim=1).detach().cpu().item() + print("Results for '{0}'".format(domain)) + print(encodings._type_list[p_d_t]) + + print(encodings._subtype_list[p_d_st]) + + print("") + sys.stdout.write('>>> ') + sys.stdout.flush() + domain = input() + + +if __name__ == '__main__': + parser = optparse.OptionParser() + parser.add_option('--interactive', action='store_true', dest='interactive') + parser.add_option('--train', action='store_true', dest='train') + parser.add_option('--resume', action='store_true', dest='resume') + parser.add_option('--train-file', action='store', dest='train_file') + parser.add_option('--dev-file', action='store', dest='dev_file') + parser.add_option('--store', action='store', dest='output_base') + parser.add_option('--patience', action='store', dest='patience', type='int', default=20, help='(default=20)') + parser.add_option('--batch-size', action='store', dest='batch_size', default=32, type='int', help='(default=32)') + parser.add_option('--device', action='store', dest='device', default='cpu') + + (params, _) = parser.parse_args(sys.argv) + + if params.train: + _start_train(params) + elif params.interactive: + _start_interactive(params) + else: + parser.print_help() diff --git a/ai-engine/stringlifier/stringlifier/modules/stringc2.py b/ai-engine/stringlifier/stringlifier/modules/stringc2.py new file mode 100644 index 0000000..09250aa --- /dev/null +++ b/ai-engine/stringlifier/stringlifier/modules/stringc2.py @@ -0,0 +1,383 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import torch.nn as nn +import optparse +import sys +import json +import numpy as np +import random +import tqdm + + +class Encodings: + def __init__(self, filename=None): + self._char2int = {'': 0, '': 1} + self._label2int = {'': 0} + self._label_list = [''] + if filename is not None: + self.load(filename) + + def save(self, filename): + json.dump({'char2int': self._char2int, 'label2int': self._label2int}, + open(filename, 'w')) + + def load(self, file): + if isinstance(file, str): + stream = open(file, 'r') + else: + stream = file + obj = json.load(stream) + self._char2int = obj['char2int'] + self._label2int = obj['label2int'] + self._label_list = [None for _ in range(len(self._label2int))] + for t in self._label2int: + self._label_list[self._label2int[t]] = t + + def update_encodings(self, dataset, cutoff=2): + char2count = {} + for entry in tqdm.tqdm(dataset): + text = entry[0] + label = entry[1] + for char in text: + char = char.lower() + if char in char2count: + char2count[char] += 1 + else: + char2count[char] = 1 + for ttype in label: + if ttype not in self._label2int: + self._label2int[ttype] = len(self._label2int) + self._label_list.append(ttype) + + for char in char2count: + if char not in self._char2int and char2count[char] > cutoff: + self._char2int[char] = len(self._char2int) + + +class CTaggerConfig: + def __init__(self): + self.char_emb_size = 100 + self.rnn_layers = 2 + self.rnn_size = 100 + self.hidden = 500 + + def save(self, filename): + json.dump({'char_emb_size': self.char_emb_size, 'rnn_layers': self.rnn_layers, 'rnn_size': self.rnn_size, + 'hidden': self.hidden}, + open(filename, 'w')) + + def load(self, file): + if isinstance(file, str): + stream = open(file, 'r') + else: + stream = file + obj = json.load(stream) + self.char_emb_size = obj['char_emb_size'] + self.rnn_size = obj['rnn_size'] + self.rnn_layers = obj['rnn_layers'] + self.hidden = obj['hidden'] + + +class CTagger(nn.Module): + def __init__(self, config, encodings): + super(CTagger, self).__init__() + self._config = config + self._encodings = encodings + self._char_emb = nn.Embedding(len(encodings._char2int), config.char_emb_size, padding_idx=0) + self._case_emb = nn.Embedding(4, 16, padding_idx=0) + + self._rnn = nn.LSTM(config.char_emb_size + 16, config.rnn_size, config.rnn_layers, batch_first=True, + bidirectional=True) + self._hidden = nn.Sequential(nn.Linear(config.rnn_size * 2, config.hidden), nn.Tanh(), nn.Dropout(0.5)) + self._softmax_type = nn.Linear(config.hidden, len(encodings._label2int)) + + def _make_input(self, word_list): + # we pad domain names and feed them in reversed character order to the LSTM + max_seq_len = max([len(word) for word in word_list]) + + x_char = np.zeros((len(word_list), max_seq_len)) + x_case = np.zeros((len(word_list), max_seq_len)) + for iBatch in range(x_char.shape[0]): + word = word_list[iBatch] + for index in range(len(word)): + char = word[index] + case_idx = 0 + if char.lower() == char.upper(): + case_idx = 1 # symbol + elif char.lower() != char: + case_idx = 2 # uppercase + else: + case_idx = 3 # lowercase + char = char.lower() + if char in self._encodings._char2int: + char_idx = self._encodings._char2int[char] + else: + char_idx = 1 # UNK + x_char[iBatch, index] = char_idx + x_case[iBatch, index] = case_idx + + return x_char, x_case + + def forward(self, string_list): + x_char, x_case = self._make_input(string_list) + x_char = torch.tensor(x_char, dtype=torch.long, device=self._get_device()) + x_case = torch.tensor(x_case, dtype=torch.long, device=self._get_device()) + hidden = torch.cat([self._char_emb(x_char), self._case_emb(x_case)], dim=-1) + hidden = torch.dropout(hidden, 0.5, self.training) + output, _ = self._rnn(hidden) + + hidden = self._hidden(output) + + return self._softmax_type(hidden) + + def save(self, path): + torch.save(self.state_dict(), path) + + def load(self, path): + self.load_state_dict(torch.load(path, map_location='cpu')) + + def _get_device(self): + if self._char_emb.weight.device.type == 'cpu': + return 'cpu' + return '{0}:{1}'.format(self._char_emb.weight.device.type, str(self._char_emb.weight.device.index)) + + +def _load_dataset(filename): + lines = open(filename, encoding='utf-8').readlines() + dataset = [] + for ii in range(len(lines) // 2): + string = lines[ii * 2][:-1] + mask = lines[ii * 2 + 1][:-1] + dataset.append((string, mask)) + return dataset + + +def _eval(model, dataset, encodings): + model.eval() + test_x, test_y = _make_batches(dataset, batch_size=128) + total_t = 0 + ok_t = 0 + with torch.no_grad(): + pgb = tqdm.tqdm(zip(test_x, test_y), total=len(test_x), ncols=80, desc='\t\t\t\t') + for x, y in pgb: + y_pred_t = model(x) + y_tar_t = _get_targets(y, encodings).reshape(-1) + y_pred_t = torch.argmax(y_pred_t, dim=-1).detach().cpu().numpy().reshape(-1) + for y_t_t, y_p_t in zip(y_tar_t, y_pred_t): + if y_t_t != 0: + total_t += 1 + + if y_t_t == y_p_t: + ok_t += 1 + + return ok_t / total_t + + +def _make_batches(dataset, batch_size=32): + batches_x = [] + batches_y = [] + + batch_x = [] + batch_y = [] + + for entry in dataset: + domain = entry[0] + t = entry[1] + batch_x.append(domain) + batch_y.append(t) + if len(batch_x) == batch_size: + batches_x.append(batch_x) + batches_y.append(batch_y) + batch_x = [] + batch_y = [] + + if len(batch_x) != 0: + batches_x.append(batch_x) + batches_y.append(batch_y) + + return batches_x, batches_y + + +def _get_targets(y, encodings): + max_len = max([len(yy) for yy in y]) + y_t = np.zeros((len(y), max_len), dtype=np.long) + for i in range(len(y)): + for j in range(max_len): + if j < len(y[i]): + y_t[i, j] = encodings._label2int[y[i][j]] + + return y_t + + +def _generate_dataset(count): + from training import generate_next_cmd + dataset = [] + for ii in range(count): + cmd, mask = generate_next_cmd() + dataset.append((cmd, mask)) + return dataset + + +def _start_train(params): + eval_at = 5000 + + if params.resume: + encodings = Encodings('{0}.encodings'.format(params.output_base)) + else: + sys.stdout.write('Generating new random data...') + sys.stdout.flush() + trainset = _generate_dataset(int(eval_at * 4 * params.batch_size)) + sys.stdout.write('done\n') + encodings = Encodings() + encodings.update_encodings(trainset) + + print('chars={0}, types={1}'.format(len(encodings._char2int), len(encodings._label2int))) + print(encodings._label2int) + + config = CTaggerConfig() + if params.resume: + config.load('{0}.conf'.format(params.output_base)) + model = CTagger(config, encodings) + model.to(params.device) + if params.resume: + model.load('{0}.last'.format(params.output_base)) + optimizer = torch.optim.Adam(model.parameters()) + criterion_t = torch.nn.CrossEntropyLoss(ignore_index=0) + + patience_left = params.patience + best_type = 0 # _eval(model, devset, encodings) + encodings.save('{0}.encodings'.format(params.output_base)) + config.save('{0}.conf'.format(params.output_base)) + model.save('{0}.last'.format(params.output_base)) + print("Deveset evaluation acc={0}".format(best_type)) + epoch = 0 + eval_at = 5000 + + while patience_left > 0: + sys.stdout.write('Generating new random data...') + sys.stdout.flush() + trainset = _generate_dataset(int(eval_at * params.batch_size)) + devset = _generate_dataset(int(eval_at / 10 * params.batch_size)) + sys.stdout.write('done\n') + sys.stdout.flush() + sys.stderr.flush() + epoch += 1 + random.shuffle(trainset) + train_x, train_y = _make_batches(trainset, batch_size=params.batch_size) + sys.stdout.write('Starting epoch {0}\n'.format(epoch)) + + pgb = tqdm.tqdm(zip(train_x, train_y), total=len(train_x), ncols=80, desc='\tloss=N/A') + model.train() + total_loss = 0 + cnt = 0 + for x, y in pgb: + cnt += 1 + if cnt % eval_at == 0: + + patience_left -= 1 + sys.stderr.flush() + sys.stderr.flush() + sys.stderr.write('\n\tEvaluating...') + sys.stderr.flush() + acc_t = _eval(model, devset, encodings) + sys.stderr.write(' acc={0}\n'.format(acc_t)) + sys.stderr.flush() + filename = '{0}.last'.format(params.output_base) + sys.stderr.write('\t\tStoring {0}\n'.format(filename)) + sys.stderr.flush() + model.save(filename) + if acc_t > best_type: + patience_left = params.patience + best_type = acc_t + filename = '{0}.bestType'.format(params.output_base) + sys.stderr.write('\t\tStoring {0}\n'.format(filename)) + sys.stderr.flush() + model.save(filename) + + sys.stderr.write('\n') + sys.stderr.flush() + model.train() + + if patience_left <= 0: + print("Stopping with maximum patience reached") + sys.exit(0) + + y_pred_t = model(x) + + y_tar_t = _get_targets(y, encodings) + y_tar_t = torch.tensor(y_tar_t, dtype=torch.long, device=params.device) + y_pred = y_pred_t.view(-1, y_pred_t.shape[-1]) + y_target = y_tar_t.view(-1) + if y_pred.shape[0] != y_target.shape[0]: + from ipdb import set_trace + set_trace() + loss = criterion_t(y_pred, y_target) + + optimizer.zero_grad() + total_loss += loss.item() + pgb.set_description('\tloss={0:.4f}'.format(total_loss / cnt)) + loss.backward() + optimizer.step() + + sys.stdout.write('AVG train loss={0} \n'.format(total_loss / len(train_x))) + + +def _start_interactive(params): + encodings = Encodings('{0}.encodings'.format(params.output_base)) + config = CTaggerConfig() + config.load('{0}.conf'.format(params.output_base)) + model = CTagger(config, encodings) + model.load('{0}.bestType'.format(params.output_base)) + model.to(params.device) + model.eval() + sys.stdout.write('>>> ') + sys.stdout.flush() + string = input() + while string != '/exit': + p_t = model([string]) + p_d_t = torch.argmax(p_t, dim=-1).detach().cpu().numpy() + print("Results for \n{0}".format(string)) + for ii in range(p_d_t.shape[-1]): + sys.stdout.write(encodings._label_list[p_d_t[0, ii]]) + sys.stdout.write('\n') + + print("") + sys.stdout.write('>>> ') + sys.stdout.flush() + string = input() + + +if __name__ == '__main__': + parser = optparse.OptionParser() + parser.add_option('--interactive', action='store_true', dest='interactive') + parser.add_option('--train', action='store_true', dest='train') + parser.add_option('--resume', action='store_true', dest='resume') + + parser.add_option('--store', action='store', dest='output_base') + parser.add_option('--patience', action='store', dest='patience', type='int', default=20, help='(default=20)') + parser.add_option('--batch-size', action='store', dest='batch_size', default=32, type='int', help='(default=32)') + parser.add_option('--device', action='store', dest='device', default='cpu') + + (params, _) = parser.parse_args(sys.argv) + + if params.train: + _start_train(params) + elif params.interactive: + _start_interactive(params) + else: + parser.print_help() diff --git a/ai-engine/stringlifier/stringlifier/modules/training.py b/ai-engine/stringlifier/stringlifier/modules/training.py new file mode 100644 index 0000000..d51bd69 --- /dev/null +++ b/ai-engine/stringlifier/stringlifier/modules/training.py @@ -0,0 +1,170 @@ +# +# Copyright (c) 2020 Adobe Systems Incorporated. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +known_words = [] + + +def _generate_word(known_words): + import uuid + import datetime + import base64 + generated = None + ii = random.randint(0, 5) + mask = 'H' + if ii == 0: + generated = str(uuid.uuid4()) + mask = 'U' + elif ii == 1: + generated = str(uuid.uuid4().hex) + mask = 'H' + elif ii == 2: + c = random.randint(0, 3) + if c == 0: + generated = str(datetime.datetime.now().timestamp()) + elif c == 1: + generated = str(random.randint(0, 100000000000)) + elif c == 2: + generated = str(random.randint(0, 999)) + '.' + str(random.randint(0, 999)) + else: + generated = str(random.randint(0, 999)) + '.' + str(random.randint(0, 9999)) + '.' + str( + random.randint(0, 9999)) + mask = 'N' + elif ii == 3: + import string + N = random.randint(5, 20) + message = [random.choice(string.ascii_uppercase + + string.digits + + string.ascii_lowercase) for _ in range(N)] + message = ''.join(message) + i = random.randint(0, 2) + if i == 0: + message = message.lower() + elif i == 1: + message = message.upper() + generated = message + elif ii == 4: + toks = [] + for _ in range(4): + toks.append(str(random.randint(0, 255))) + generated = '.'.join(toks) + mask = 'I' + elif ii == 5: + generated = _generate_JWT_token(known_words) + mask = 'J' + return str(generated), mask[0] + + +lines = open('corpus/words_alpha.txt').readlines() +for line in lines: + known_words.append(line.strip()) + + +def _generate_JWT_token(known_words): + import jwt + + payload = {"id": str(random.random()), "client_id": str(random.random()), "user_id": str(random.random()), + "type": "access_token", + "expires_in": str(random.randint(10, 3600000)), "scope": "read, write", + "created_at": str(random.randint(1900000, 9000000))} + encoded_jwt = jwt.encode(payload, 'secret', algorithm='HS256') + + return str(encoded_jwt)[2:-1] + + +# generated_words = generate_words(len(known_words), known_words) + +known_index = 0 + +import random + +random.shuffle(known_words) + + +def _get_next_known(): + global known_index + s = known_words[known_index] + known_index += 1 + if known_index == len(known_words): + known_index = 0 + random.shuffle(known_words) + return s + + +def _get_next_gen(): + global known_words + s, m = _generate_word(known_words) + return s, m + + +import random + + +def generate_next_cmd(): + delimiters = ' /.,?!~|<>-=_~:;\\+-&*%$#@!' + enclosers = '[]{}``""\'\'()' + mask = '' + cmd = '' + num_words = random.randint(3, 15) + use_space = False + use_delimiter = False + use_encloser = False + append_number = False + for ii in range(num_words): + + use_delimiter = random.random() > 0.5 + use_encloser = random.random() > 0.8 + case_style = random.randint(0, 2) + use_gen_word = random.random() > 0.7 + + del_index = random.randint(0, len(delimiters) - 1) + enc_index = random.randint(0, len(enclosers) // 2 - 1) * 2 + if use_space: + mask += 'C' + cmd += ' ' + if use_gen_word: + wrd, label = _get_next_gen() + if case_style == 1: + wrd = wrd[0].upper() + wrd[1:] + elif case_style == 2: + wrd = wrd.upper() + msk = '' + for _ in range(len(wrd)): + msk += label + else: + wrd = _get_next_known() + append_number = random.random() > 0.97 + if append_number: + wrd = wrd + str(random.randint(0, 99)) + if case_style == 1: + wrd = wrd[0].upper() + wrd[1:] + elif case_style == 2: + wrd = wrd.upper() + msk = '' + for _ in range(len(wrd)): + msk += 'C' + + if use_delimiter: + wrd = delimiters[del_index] + wrd + msk = 'C' + msk + if use_encloser: + wrd = enclosers[enc_index] + wrd + enclosers[enc_index + 1] + msk = 'C' + msk + 'C' + + cmd += wrd + mask += msk + use_space = random.random() > 0.7 + + return cmd, mask diff --git a/sentryflow-clients/log-client/Dockerfile b/clients/log-client/Dockerfile similarity index 53% rename from sentryflow-clients/log-client/Dockerfile rename to clients/log-client/Dockerfile index 82113e7..8d89d21 100644 --- a/sentryflow-clients/log-client/Dockerfile +++ b/clients/log-client/Dockerfile @@ -7,26 +7,25 @@ FROM golang:1.21-alpine3.17 as builder RUN apk --no-cache update RUN apk add --no-cache git clang llvm make gcc protobuf musl-dev -RUN mkdir /app +RUN mkdir /client RUN mkdir /protobuf WORKDIR /protobuf COPY /protobuf . -WORKDIR /app -COPY /sentryflow-clients/log-client . +WORKDIR /client +COPY /clients/log-client . RUN go build -o log-client ### Make executable image -FROM alpine:3.18 as client +FROM alpine:3.17 as client -RUN echo "@community http://dl-cdn.alpinelinux.org/alpine/edge/community" | tee -a /etc/apk/repositories +# RUN echo "@community http://dl-cdn.alpinelinux.org/alpine/edge/community" | tee -a /etc/apk/repositories +# RUN apk --no-cache update +# RUN apk add bash -RUN apk --no-cache update -RUN apk add bash - -COPY --from=builder /app/log-client / +COPY --from=builder /client/log-client / CMD ["/log-client"] diff --git a/sentryflow-clients/log-client/Makefile b/clients/log-client/Makefile similarity index 96% rename from sentryflow-clients/log-client/Makefile rename to clients/log-client/Makefile index e827b9a..5556bdf 100644 --- a/sentryflow-clients/log-client/Makefile +++ b/clients/log-client/Makefile @@ -4,6 +4,15 @@ CLIENT_NAME = sentryflow-log-client IMAGE_NAME = 5gsec/$(CLIENT_NAME) TAG = v0.1 +.PHONY: build +build: gofmt golint gosec + go mod tidy + go build -o $(CLIENT_NAME) + +.PHONY: clean +clean: + rm -f $(CLIENT_NAME) + .PHONY: gofmt gofmt: cd $(CURDIR); gofmt -w -s -d $(shell find . -type f -name '*.go' -print) @@ -38,15 +47,6 @@ ifeq (, $(shell which gosec)) endif cd $(CURDIR); gosec -exclude=G402 ./... -.PHONY: build gofmt golint gosec -build: - go mod tidy - go build -o $(CLIENT_NAME) - -.PHONY: clean -clean: - rm -f $(CLIENT_NAME) - .PHONY: build-image build-image: docker build -t $(IMAGE_NAME):$(TAG) -f ./Dockerfile ../../ diff --git a/sentryflow-clients/log-client/client/logClient.go b/clients/log-client/client/client.go similarity index 63% rename from sentryflow-clients/log-client/client/logClient.go rename to clients/log-client/client/client.go index d836ea2..021acbd 100644 --- a/sentryflow-clients/log-client/client/logClient.go +++ b/clients/log-client/client/client.go @@ -15,13 +15,10 @@ import ( type Feeder struct { Running bool - client pb.SentryFlowClient - - logStream pb.SentryFlow_GetLogClient - + client pb.SentryFlowClient + logStream pb.SentryFlow_GetAPILogClient envoyMetricStream pb.SentryFlow_GetEnvoyMetricsClient - - apiMetricStream pb.SentryFlow_GetAPIMetricsClient + apiMetricStream pb.SentryFlow_GetAPIMetricsClient Done chan struct{} } @@ -32,33 +29,33 @@ func StrToFile(str, targetFile string) { if err != nil { newFile, err := os.Create(filepath.Clean(targetFile)) if err != nil { - fmt.Printf("Failed to create a file (%s, %s)\n", targetFile, err.Error()) + fmt.Printf("[Client] Failed to create a file (%s, %s)\n", targetFile, err.Error()) return } err = newFile.Close() if err != nil { - fmt.Printf("Failed to close the file (%s, %s)\n", targetFile, err.Error()) + fmt.Printf("[Client] Failed to close the file (%s, %s)\n", targetFile, err.Error()) } } file, err := os.OpenFile(targetFile, os.O_WRONLY|os.O_APPEND, 0600) if err != nil { - fmt.Printf("Failed to open a file (%s, %s)\n", targetFile, err.Error()) + fmt.Printf("[Client] Failed to open a file (%s, %s)\n", targetFile, err.Error()) } defer func() { if err := file.Close(); err != nil { - fmt.Printf("Failed to close the file (%s, %s)\n", targetFile, err.Error()) + fmt.Printf("[Client] Failed to close the file (%s, %s)\n", targetFile, err.Error()) } }() _, err = file.WriteString(str) if err != nil { - fmt.Printf("Failed to write a string into the file (%s, %s)\n", targetFile, err.Error()) + fmt.Printf("[Client] Failed to write a string into the file (%s, %s)\n", targetFile, err.Error()) } } // NewClient Function -func NewClient(client pb.SentryFlowClient, logCfg string, metricCfg string, metricFilter string, clientInfo *pb.ClientInfo) *Feeder { +func NewClient(client pb.SentryFlowClient, clientInfo *pb.ClientInfo, logCfg string, metricCfg string, metricFilter string) *Feeder { fd := &Feeder{} fd.Running = true @@ -69,47 +66,48 @@ func NewClient(client pb.SentryFlowClient, logCfg string, metricCfg string, metr if logCfg != "none" { // Contact the server and print out its response - logStream, err := client.GetLog(context.Background(), clientInfo) + logStream, err := client.GetAPILog(context.Background(), clientInfo) if err != nil { - log.Fatalf("could not get log: %v", err) + log.Fatalf("[Client] Could not get API log: %v", err) } fd.logStream = logStream } - if metricCfg != "none" && (metricFilter == "all" || metricFilter == "envoy") { - emStream, err := client.GetEnvoyMetrics(context.Background(), clientInfo) + if metricCfg != "none" && (metricFilter == "all" || metricFilter == "api") { + amStream, err := client.GetAPIMetrics(context.Background(), clientInfo) if err != nil { - log.Fatalf("could not get log: %v", err) + log.Fatalf("[Client] Could not get API metrics: %v", err) } - fd.envoyMetricStream = emStream + fd.apiMetricStream = amStream } - if metricCfg != "none" && (metricFilter == "all" || metricFilter == "api") { - amStream, err := client.GetAPIMetrics(context.Background(), clientInfo) + if metricCfg != "none" && (metricFilter == "all" || metricFilter == "envoy") { + emStream, err := client.GetEnvoyMetrics(context.Background(), clientInfo) if err != nil { - log.Fatalf("could not get log: %v", err) + log.Fatalf("[Client] Could not get Enovy metrics: %v", err) } - fd.apiMetricStream = amStream + fd.envoyMetricStream = emStream } return fd } -// LogRoutine Function -func (fd *Feeder) LogRoutine(logCfg string) { +// APILogRoutine Function +func (fd *Feeder) APILogRoutine(logCfg string) { for fd.Running { select { default: data, err := fd.logStream.Recv() if err != nil { - log.Fatalf("failed to receive log: %v", err) + log.Fatalf("[Client] Failed to receive an API log: %v", err) break } + str := "" - str = str + "== Access Log ==\n" + str = str + "== API Log ==\n" str = str + fmt.Sprintf("%v\n", data) if logCfg == "stdout" { @@ -123,28 +121,20 @@ func (fd *Feeder) LogRoutine(logCfg string) { } } -// EnvoyMetricRoutine Function -func (fd *Feeder) EnvoyMetricRoutine(metricCfg string) { - metricKeys := []string{"GAUGE", "COUNTER", "HISTOGRAM", "SUMMARY"} +// APIMetricsRoutine Function +func (fd *Feeder) APIMetricsRoutine(metricCfg string) { for fd.Running { select { default: - data, err := fd.envoyMetricStream.Recv() + data, err := fd.apiMetricStream.Recv() if err != nil { - log.Fatalf("failed to receive metric: %v", err) + log.Fatalf("[Client] Failed to receive API metrics: %v", err) break } str := "" - str = fmt.Sprintf("== Envoy Metrics / %s ==\n", data.TimeStamp) - str = str + fmt.Sprintf("Namespace: %s\n", data.Namespace) - str = str + fmt.Sprintf("Name: %s\n", data.Name) - str = str + fmt.Sprintf("PodIP: %s\n", data.PodIP) - str = str + fmt.Sprintf("Labels: %s\n", data.Labels) - - for _, key := range metricKeys { - str = str + fmt.Sprintf("%s: {%v}\n", key, data.Metric[key]) - } + str = str + "== API Metrics ==\n" + str = str + fmt.Sprintf("%v\n", data) if metricCfg == "stdout" { fmt.Printf("%s", str) @@ -157,20 +147,28 @@ func (fd *Feeder) EnvoyMetricRoutine(metricCfg string) { } } -// APIMetricRoutine Function -func (fd *Feeder) APIMetricRoutine(metricCfg string) { +// EnvoyMetricsRoutine Function +func (fd *Feeder) EnvoyMetricsRoutine(metricCfg string) { + metricKeys := []string{"GAUGE", "COUNTER", "HISTOGRAM", "SUMMARY"} for fd.Running { select { default: - data, err := fd.apiMetricStream.Recv() + data, err := fd.envoyMetricStream.Recv() if err != nil { - log.Fatalf("failed to receive metric: %v", err) + log.Fatalf("[Client] Failed to receive Envoy metrics: %v", err) break } str := "" - str = str + "== API Metrics ==\n" - str = str + fmt.Sprintf("%v\n", data) + str = fmt.Sprintf("== Envoy Metrics / %s ==\n", data.TimeStamp) + str = str + fmt.Sprintf("Namespace: %s\n", data.Namespace) + str = str + fmt.Sprintf("Name: %s\n", data.Name) + str = str + fmt.Sprintf("IPAddress: %s\n", data.IPAddress) + str = str + fmt.Sprintf("Labels: %s\n", data.Labels) + + for _, key := range metricKeys { + str = str + fmt.Sprintf("%s: {%v}\n", key, data.Metrics[key]) + } if metricCfg == "stdout" { fmt.Printf("%s", str) diff --git a/sentryflow-clients/log-client/common/config.go b/clients/log-client/config/config.go similarity index 57% rename from sentryflow-clients/log-client/common/config.go rename to clients/log-client/config/config.go index 3ff2213..a14c153 100644 --- a/sentryflow-clients/log-client/common/config.go +++ b/clients/log-client/config/config.go @@ -1,6 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 -package common +package config import ( "errors" @@ -9,29 +9,39 @@ import ( "strconv" ) -// Config structure +// Config Structure type Config struct { - ServerAddr string - ServerPort int + Hostname string + + ServerAddr string + ServerPort int + LogCfg string MetricCfg string MetricFilter string } -// Cfg is for global reference +// Cfg for Global Reference var Cfg Config -// LoadEnvVars loads environment variables and stores them as global variable +// LoadEnvVars loads environment variables and stores them in Cfg (global variables) func LoadEnvVars() (Config, error) { var err error + // get hostname + Cfg.Hostname, err = os.Hostname() + if err != nil { + msg := fmt.Sprintf("[Config] Could not find hostname: %v", err) + return Cfg, errors.New(msg) + } + // load listen address and check if valid Cfg.ServerAddr = os.Getenv("SERVER_ADDR") // load listen port and check if valid Cfg.ServerPort, err = strconv.Atoi(os.Getenv("SERVER_PORT")) if err != nil { - msg := fmt.Sprintf("invalid server port %s: %v", os.Getenv("SERVER_PORT"), err) + msg := fmt.Sprintf("[Config] Invalid server port %s: %v", os.Getenv("SERVER_PORT"), err) return Cfg, errors.New(msg) } diff --git a/sentryflow-clients/log-client/go.mod b/clients/log-client/go.mod similarity index 71% rename from sentryflow-clients/log-client/go.mod rename to clients/log-client/go.mod index 3fbb98e..b9e1bec 100644 --- a/sentryflow-clients/log-client/go.mod +++ b/clients/log-client/go.mod @@ -10,9 +10,9 @@ require ( ) require ( - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/sentryflow-clients/log-client/go.sum b/clients/log-client/go.sum similarity index 64% rename from sentryflow-clients/log-client/go.sum rename to clients/log-client/go.sum index e1723cb..6e760e0 100644 --- a/sentryflow-clients/log-client/go.sum +++ b/clients/log-client/go.sum @@ -1,14 +1,14 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/sentryflow-clients/log-client/main.go b/clients/log-client/main.go similarity index 50% rename from sentryflow-clients/log-client/main.go rename to clients/log-client/main.go index d75fcaa..a177cd9 100644 --- a/sentryflow-clients/log-client/main.go +++ b/clients/log-client/main.go @@ -8,26 +8,29 @@ import ( "fmt" "log" "log-client/client" - "log-client/common" + "log-client/config" "os" "os/signal" "syscall" "google.golang.org/grpc" - _ "google.golang.org/grpc/encoding/gzip" // If not set, encoding problem occurs https://stackoverflow.com/questions/74062727 ) +// ========== // +// == Main == // +// ========== // + func main() { // Load environment variables - cfg, err := common.LoadEnvVars() + cfg, err := config.LoadEnvVars() if err != nil { - log.Fatalf("Could not load environment variables: %v", err) + log.Fatalf("[Config] Could not load environment variables: %v", err) } - // get arguments - logCfgPtr := flag.String("logCfg", "stdout", "Output location for logs, {path|stdout|none}") - metricCfgPtr := flag.String("metricCfg", "stdout", "Output location for envoy metrics and api metrics, {path|stdout|none}") - metricFilterPtr := flag.String("metricFilter", "envoy", "Filter for what kinds of envoy and api metric to receive, {policy|envoy|api}") + // Get arguments + logCfgPtr := flag.String("logCfg", "stdout", "Output location for API logs, {stdout|file|none}") + metricCfgPtr := flag.String("metricCfg", "stdout", "Output location for API and Envoy metrics, {stdout|file|none}") + metricFilterPtr := flag.String("metricFilter", "envoy", "Filter to select specific API or Envoy metrics to receive, {api|envoy}") flag.Parse() if *logCfgPtr == "none" && *metricCfgPtr == "none" { @@ -45,53 +48,52 @@ func main() { *metricFilterPtr = cfg.MetricFilter } - if *metricFilterPtr != "all" && *metricFilterPtr != "envoy" && *metricFilterPtr != "api" { + if *metricFilterPtr != "all" && *metricFilterPtr != "api" && *metricFilterPtr != "envoy" { flag.PrintDefaults() return } - // Construct address and start listening + // == // + + // Construct a string "ServerAddr:ServerPort" addr := fmt.Sprintf("%s:%d", cfg.ServerAddr, cfg.ServerPort) - // Set up a connection to the server. + // Connect to the gRPC server of SentryFlow conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { - log.Fatalf("could not connect: %v", err) + log.Fatalf("[gRPC] Failed to connect: %v", err) + return } defer conn.Close() - // Start serving gRPC server - log.Printf("[gRPC] Successfully connected to %s for AccessLog", addr) - - // Create a client for the SentryFlow service - sfClient := protobuf.NewSentryFlowClient(conn) - - hostname, err := os.Hostname() - if err != nil { - log.Fatalf("could not find hostname: %v", err) - } + // Connected to the gRPC server + log.Printf("[gRPC] Started to collect Logs from %s", addr) - // Define the client information + // Define clientInfo clientInfo := &protobuf.ClientInfo{ - HostName: hostname, + HostName: cfg.Hostname, } - logClient := client.NewClient(sfClient, *logCfgPtr, *metricCfgPtr, *metricFilterPtr, clientInfo) + // Create a gRPC client for the SentryFlow service + sfClient := protobuf.NewSentryFlowClient(conn) + + // Create a log client with the gRPC client + logClient := client.NewClient(sfClient, clientInfo, *logCfgPtr, *metricCfgPtr, *metricFilterPtr) if *logCfgPtr != "none" { - go logClient.LogRoutine(*logCfgPtr) - fmt.Printf("Started to watch logs\n") + go logClient.APILogRoutine(*logCfgPtr) + fmt.Printf("[APILog] Started to watch API logs\n") } if *metricCfgPtr != "none" { - if *metricFilterPtr == "all" || *metricFilterPtr == "envoy" { - go logClient.EnvoyMetricRoutine(*metricCfgPtr) - fmt.Printf("Started to watch envoy metrics\n") + if *metricFilterPtr == "all" || *metricFilterPtr == "api" { + go logClient.APIMetricsRoutine(*metricCfgPtr) + fmt.Printf("[Metric] Started to watch API Metrics\n") } - if *metricFilterPtr == "all" || *metricFilterPtr == "api" { - go logClient.APIMetricRoutine(*metricCfgPtr) - fmt.Printf("Started to watch api metrics\n") + if *metricFilterPtr == "all" || *metricFilterPtr == "envoy" { + go logClient.EnvoyMetricsRoutine(*metricCfgPtr) + fmt.Printf("[Metric] Started to watch Envoy Metrics\n") } } diff --git a/sentryflow-clients/mongo-client/Dockerfile b/clients/mongo-client/Dockerfile similarity index 53% rename from sentryflow-clients/mongo-client/Dockerfile rename to clients/mongo-client/Dockerfile index 359cb31..d71eecc 100644 --- a/sentryflow-clients/mongo-client/Dockerfile +++ b/clients/mongo-client/Dockerfile @@ -7,28 +7,25 @@ FROM golang:1.21-alpine3.17 as builder RUN apk --no-cache update RUN apk add --no-cache git clang llvm make gcc protobuf musl-dev -RUN mkdir /app +RUN mkdir /client RUN mkdir /protobuf WORKDIR /protobuf - COPY /protobuf . -WORKDIR /app - -COPY /sentryflow-clients/mongo-client . +WORKDIR /client +COPY /clients/mongo-client . RUN go build -o mongo-client ### Make executable image -FROM alpine:3.18 as client +FROM alpine:3.17 as client -RUN echo "@community http://dl-cdn.alpinelinux.org/alpine/edge/community" | tee -a /etc/apk/repositories - -RUN apk --no-cache update -RUN apk add bash +# RUN echo "@community http://dl-cdn.alpinelinux.org/alpine/edge/community" | tee -a /etc/apk/repositories +# RUN apk --no-cache update +# RUN apk add bash -COPY --from=builder /app/mongo-client / +COPY --from=builder /client/mongo-client / CMD ["/mongo-client"] diff --git a/sentryflow-clients/mongo-client/Makefile b/clients/mongo-client/Makefile similarity index 96% rename from sentryflow-clients/mongo-client/Makefile rename to clients/mongo-client/Makefile index c1ddc60..daab1d1 100644 --- a/sentryflow-clients/mongo-client/Makefile +++ b/clients/mongo-client/Makefile @@ -4,6 +4,15 @@ CLIENT_NAME = sentryflow-mongo-client IMAGE_NAME = 5gsec/$(CLIENT_NAME) TAG = v0.1 +.PHONY: build +build: gofmt golint gosec + go mod tidy + go build -o $(CLIENT_NAME) + +.PHONY: clean +clean: + rm -f $(CLIENT_NAME) + .PHONY: gofmt gofmt: cd $(CURDIR); gofmt -w -s -d $(shell find . -type f -name '*.go' -print) @@ -38,15 +47,6 @@ ifeq (, $(shell which gosec)) endif cd $(CURDIR); gosec -exclude=G402 ./... -.PHONY: build gofmt golint gosec -build: - go mod tidy - go build -o $(CLIENT_NAME) - -.PHONY: clean -clean: - rm -f $(CLIENT_NAME) - .PHONY: build-image build-image: docker build -t $(IMAGE_NAME):$(TAG) -f ./Dockerfile ../../ diff --git a/clients/mongo-client/client/client.go b/clients/mongo-client/client/client.go new file mode 100644 index 0000000..bbef3a5 --- /dev/null +++ b/clients/mongo-client/client/client.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + pb "SentryFlow/protobuf" + "context" + "log" + "mongo-client/mongodb" +) + +// Feeder Structure +type Feeder struct { + Running bool + + client pb.SentryFlowClient + logStream pb.SentryFlow_GetAPILogClient + envoyMetricStream pb.SentryFlow_GetEnvoyMetricsClient + apiMetricStream pb.SentryFlow_GetAPIMetricsClient + + dbHandler mongodb.DBHandler + + Done chan struct{} +} + +// NewClient Function +func NewClient(client pb.SentryFlowClient, clientInfo *pb.ClientInfo, logCfg string, metricCfg string, metricFilter string, mongoDBAddr string) *Feeder { + fd := &Feeder{} + + fd.Running = true + fd.client = client + fd.Done = make(chan struct{}) + + if logCfg != "none" { + // Contact the server and print out its response + logStream, err := client.GetAPILog(context.Background(), clientInfo) + if err != nil { + log.Fatalf("[Client] Could not get API log: %v", err) + } + + fd.logStream = logStream + } + + if metricCfg != "none" && (metricFilter == "all" || metricFilter == "api") { + amStream, err := client.GetAPIMetrics(context.Background(), clientInfo) + if err != nil { + log.Fatalf("[Client] Could not get API metrics: %v", err) + } + + fd.apiMetricStream = amStream + } + + if metricCfg != "none" && (metricFilter == "all" || metricFilter == "envoy") { + emStream, err := client.GetEnvoyMetrics(context.Background(), clientInfo) + if err != nil { + log.Fatalf("[Client] Could not get Envoy metrics: %v", err) + } + + fd.envoyMetricStream = emStream + } + + // Initialize DB + dbHandler, err := mongodb.NewMongoDBHandler(mongoDBAddr) + if err != nil { + log.Fatalf("[MongoDB] Unable to intialize DB: %v", err) + } + fd.dbHandler = *dbHandler + + return fd +} + +// APILogRoutine Function +func (fd *Feeder) APILogRoutine(logCfg string) { + for fd.Running { + select { + default: + data, err := fd.logStream.Recv() + if err != nil { + log.Fatalf("[Client] Failed to receive an API log: %v", err) + break + } + err = fd.dbHandler.InsertAPILog(data) + if err != nil { + log.Fatalf("[MongoDB] Failed to insert an API log: %v", err) + } + case <-fd.Done: + return + } + } +} + +// APIMetricsRoutine Function +func (fd *Feeder) APIMetricsRoutine(metricCfg string) { + for fd.Running { + select { + default: + data, err := fd.apiMetricStream.Recv() + if err != nil { + log.Fatalf("[Client] Failed to receive API metrics: %v", err) + break + } + err = fd.dbHandler.InsertAPIMetrics(data) + if err != nil { + log.Fatalf("[MongoDB] Failed to insert API metrics: %v", err) + } + case <-fd.Done: + return + } + } +} + +// EnvoyMetricsRoutine Function +func (fd *Feeder) EnvoyMetricsRoutine(metricCfg string) { + for fd.Running { + select { + default: + data, err := fd.envoyMetricStream.Recv() + if err != nil { + log.Fatalf("[Client] Failed to receive Envoy metrics: %v", err) + break + } + err = fd.dbHandler.InsertEnvoyMetrics(data) + if err != nil { + log.Fatalf("[MongoDB] Failed to insert Envoy metrics: %v", err) + } + case <-fd.Done: + return + } + } +} diff --git a/clients/mongo-client/config/config.go b/clients/mongo-client/config/config.go new file mode 100644 index 0000000..3fd3d1b --- /dev/null +++ b/clients/mongo-client/config/config.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "errors" + "fmt" + "os" + "strconv" +) + +// Config Structure +type Config struct { + Hostname string + + ServerAddr string + ServerPort int + + LogCfg string + MetricCfg string + MetricFilter string + + MongoDBAddr string +} + +// Cfg for Global Reference +var Cfg Config + +// LoadEnvVars loads environment variables and stores them in Cfg (global variables) +func LoadEnvVars() (Config, error) { + var err error + + // get hostname + Cfg.Hostname, err = os.Hostname() + if err != nil { + msg := fmt.Sprintf("[Config] Could not find hostname: %v", err) + return Cfg, errors.New(msg) + } + + // load listen address and check if valid + Cfg.ServerAddr = os.Getenv("SERVER_ADDR") + + // load listen port and check if valid + Cfg.ServerPort, err = strconv.Atoi(os.Getenv("SERVER_PORT")) + if err != nil { + msg := fmt.Sprintf("[Config] Invalid server port %s: %v", os.Getenv("SERVER_PORT"), err) + return Cfg, errors.New(msg) + } + + Cfg.LogCfg = os.Getenv("LOG_CFG") + Cfg.MetricCfg = os.Getenv("METRIC_CFG") + Cfg.MetricFilter = os.Getenv("METRIC_FILTER") + + // load MongoDB address + Cfg.MongoDBAddr = os.Getenv("MONGODB_ADDR") + + return Cfg, nil +} diff --git a/sentryflow-clients/mongo-client/go.mod b/clients/mongo-client/go.mod similarity index 82% rename from sentryflow-clients/mongo-client/go.mod rename to clients/mongo-client/go.mod index 2fb441e..a8e4091 100644 --- a/sentryflow-clients/mongo-client/go.mod +++ b/clients/mongo-client/go.mod @@ -18,11 +18,11 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/sentryflow-clients/mongo-client/go.sum b/clients/mongo-client/go.sum similarity index 90% rename from sentryflow-clients/mongo-client/go.sum rename to clients/mongo-client/go.sum index 6a8f8ef..f1e8df1 100644 --- a/sentryflow-clients/mongo-client/go.sum +++ b/clients/mongo-client/go.sum @@ -23,15 +23,15 @@ go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwD golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= @@ -42,8 +42,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -63,5 +63,5 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/clients/mongo-client/main.go b/clients/mongo-client/main.go new file mode 100644 index 0000000..95dea38 --- /dev/null +++ b/clients/mongo-client/main.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + protobuf "SentryFlow/protobuf" + + "flag" + "fmt" + "log" + "mongo-client/client" + "mongo-client/config" + "os" + "os/signal" + "syscall" + + "google.golang.org/grpc" +) + +// ========== // +// == Main == // +// ========== // + +func main() { + // Load environment variables + cfg, err := config.LoadEnvVars() + if err != nil { + log.Fatalf("[Config] Could not load environment variables: %v", err) + } + + // Get arguments + logCfgPtr := flag.String("logCfg", "mongodb", "Location for storing API logs, {mongodb|none}") + metricCfgPtr := flag.String("metricCfg", "mongodb", "Location for storing API and Envoy metrics, {mongodb|none}") + metricFilterPtr := flag.String("metricFilter", "envoy", "Filter to select specific API or Envoy metrics to receive, {api|envoy}") + mongoDBAddrPtr := flag.String("mongodb", "", "MongoDB Server Address") + flag.Parse() + + if *logCfgPtr == "none" && *metricCfgPtr == "none" { + flag.PrintDefaults() + return + } + + if cfg.LogCfg != "" { + *logCfgPtr = cfg.LogCfg + } + if cfg.MetricCfg != "" { + *metricCfgPtr = cfg.MetricCfg + } + if cfg.MetricFilter != "" { + *metricFilterPtr = cfg.MetricFilter + } + if cfg.MongoDBAddr != "" { + *mongoDBAddrPtr = cfg.MongoDBAddr + } + + if *metricFilterPtr != "all" && *metricFilterPtr != "api" && *metricFilterPtr != "envoy" { + flag.PrintDefaults() + return + } + + // == // + + // Construct a string "ServerAddr:ServerPort" + addr := fmt.Sprintf("%s:%d", cfg.ServerAddr, cfg.ServerPort) + + // Connect to the gRPC server of SentryFlow + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + log.Fatalf("[gRPC] Failed to connect: %v", err) + return + } + defer conn.Close() + + // Connected to the gRPC server + log.Printf("[gRPC] Started to collect Logs from %s", addr) + + // Define clientInfo + clientInfo := &protobuf.ClientInfo{ + HostName: cfg.Hostname, + } + + // Create a gRPC client for the SentryFlow service + sfClient := protobuf.NewSentryFlowClient(conn) + + // Create a log client with the gRPC client + logClient := client.NewClient(sfClient, clientInfo, *logCfgPtr, *metricCfgPtr, *metricFilterPtr, *mongoDBAddrPtr) + + if *logCfgPtr != "none" { + go logClient.APILogRoutine(*logCfgPtr) + fmt.Printf("[APILog] Started to watch API logs\n") + } + + if *metricCfgPtr != "none" { + if *metricFilterPtr == "all" || *metricFilterPtr == "api" { + go logClient.APIMetricsRoutine(*metricCfgPtr) + fmt.Printf("[Metric] Started to watch API metrics\n") + } + + if *metricFilterPtr == "all" || *metricFilterPtr == "envoy" { + go logClient.EnvoyMetricsRoutine(*metricCfgPtr) + fmt.Printf("[Metric] Started to watch Envoy metrics\n") + } + } + + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) + + <-signalChan + + close(logClient.Done) +} diff --git a/clients/mongo-client/mongodb/mongoHandler.go b/clients/mongo-client/mongodb/mongoHandler.go new file mode 100644 index 0000000..eaedb76 --- /dev/null +++ b/clients/mongo-client/mongodb/mongoHandler.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: Apache-2.0 + +package mongodb + +import ( + protobuf "SentryFlow/protobuf" + "context" + "errors" + "fmt" + "log" + "time" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// DBHandler Structure +type DBHandler struct { + client *mongo.Client + cancel context.CancelFunc + + database *mongo.Database + apiLogCol *mongo.Collection + apiMetricsCol *mongo.Collection + evyMetricsCol *mongo.Collection +} + +// dbHandler for Global Reference +var dbHandler DBHandler + +// NewMongoDBHandler Function +func NewMongoDBHandler(mongoDBAddr string) (*DBHandler, error) { + var err error + + // Create a MongoDB client + dbHandler.client, err = mongo.NewClient(options.Client().ApplyURI(mongoDBAddr)) + if err != nil { + msg := fmt.Sprintf("[MongoDB] Unable to initialize a monogoDB client (%s): %v", mongoDBAddr, err) + return nil, errors.New(msg) + } + + // Set timeout (10 sec) + var ctx context.Context + ctx, dbHandler.cancel = context.WithTimeout(context.Background(), 10*time.Second) + + // Connect to the MongoDB server + err = dbHandler.client.Connect(ctx) + if err != nil { + msg := fmt.Sprintf("[MongoDB] Unable to connect the mongoDB server (%s): %v", mongoDBAddr, err) + return nil, errors.New(msg) + } + + // Create 'SentryFlow' database + dbHandler.database = dbHandler.client.Database("SentryFlow") + + // Create APILogs and Metrics collections + dbHandler.apiLogCol = dbHandler.database.Collection("APILogs") + dbHandler.apiMetricsCol = dbHandler.database.Collection("APIMetrics") + dbHandler.evyMetricsCol = dbHandler.database.Collection("EnvoyMetrics") + + return &dbHandler, nil +} + +// Disconnect Function +func (handler *DBHandler) Disconnect() { + err := handler.client.Disconnect(context.Background()) + if err != nil { + log.Printf("[MongoDB] Unable to properly disconnect: %v", err) + } +} + +// InsertAPILog Function +func (handler *DBHandler) InsertAPILog(data *protobuf.APILog) error { + _, err := handler.apiLogCol.InsertOne(context.Background(), data) + if err != nil { + return err + } + + return nil +} + +// InsertAPIMetrics Function +func (handler *DBHandler) InsertAPIMetrics(data *protobuf.APIMetrics) error { + _, err := handler.apiMetricsCol.InsertOne(context.Background(), data) + if err != nil { + return err + } + + return nil +} + +// InsertEnvoyMetrics Function +func (handler *DBHandler) InsertEnvoyMetrics(data *protobuf.EnvoyMetrics) error { + _, err := handler.evyMetricsCol.InsertOne(context.Background(), data) + if err != nil { + return err + } + + return nil +} diff --git a/contribution/README.md b/contribution/README.md index 605a1c7..44c01f7 100644 --- a/contribution/README.md +++ b/contribution/README.md @@ -1,26 +1,18 @@ -# Development Guide +# Contribution Guide -SentryFlow operates within an Istio environment on Kubernetes, indicating that contributors to our project will need to have an Istio environment set up. +SentryFlow operates within Istio on Kubernetes. This means project participants will need an Istio environment. -To minimize the hassle of installing and uninstalling Kubernetes and configuring Istio solely for our project, we have provided a straightforward Vagrantfile. This Vagrantfile initializes an Ubuntu virtual machine equipped with a fully operational Kubernetes and Istio environment. +To minimize the hassle of installing (uninstalling) Kubernetes and configuring Istio, we have prepared a Vagrantfile that initializes an Ubuntu VM with fully functional Kubernetes and Istio. ## 1. Prerequisites -We employ Vagrant to provision VirtualBox virtual machines, creating a Kubernetes environment. As such, it is highly recommended to install the following package versions in your local environment: +The provided Vagrantfile is tested on the following environment (i.e., Vagrant with VirtualBox). - **[Vagrant](https://www.vagrantup.com/)** - v2.2.9 - **[VirtualBox](https://www.virtualbox.org/)** - v6.1 ## 2. Starting up a VM -We have configured a Vagrantfile that initiates an Ubuntu 22.04 machine with Kubernetes pre-installed. The setup for Kubernetes is as described below: - -> **Note:** Although Kubernetes officially advises the use of containerd over Docker as the Container Runtime Interface (CRI), we have chosen to use Docker as the CRI within our Kubernetes setup. This decision facilitates the building and testing of SentryFlow and its client images. - -- Kubernetes: 1.23 -- [CRI] Docker: 24.0.7 -- [CNI] Calico: 0.3.1 - To proceed, execute the following command within the `contribution/` directory: ```bash @@ -42,15 +34,15 @@ This command will initiate the installation of the necessary development environ ### Development -After Vagrant has been successfully initialized, you can access the Istio and Kubernetes environment by executing the following steps: +Once Vagrant has been initialized successfully, you can access the Kubernetes environment by following these steps: ``` $ vagrant ssh ``` -The source code for SentryFlow will be located in `/home/vagrant/sentryflow` within the virtual environment, and this directory will also be synchronized with the current work directory on the host machine. +The source code for SentryFlow will be located at `/home/vagrant/sentryflow` within the virtual environment, and this directory will also be synchronized with the current working directory on the host machine. -After making modifications to the source code of SentryFlow, you can build the changes by moving to the `sentryflow` directory and running the Makefile. +After making modifications to the source code of SentryFlow, you can build the changes by navigating to the `sentryflow` directory and running the Makefile. ``` make build @@ -65,9 +57,9 @@ To maintain a clean and secure code base for SentryFlow, we conduct several chec To evaluate the quality of your code, navigate to the `sentryflow` directory and execute the following commands: ``` -make golint # will run golint checks -make gofmt # will run gofmt checks -make gosec # will run gosec checks +make golint # run golint checks +make gofmt # run gofmt checks +make gosec # run gosec checks ``` ### Pull Request diff --git a/contribution/vagrant/Vagrantfile b/contribution/vagrant/Vagrantfile index a4e51a8..08f0eca 100644 --- a/contribution/vagrant/Vagrantfile +++ b/contribution/vagrant/Vagrantfile @@ -46,6 +46,6 @@ Vagrant.configure("2") do |config| # copy git config config.vm.provision :file, source: "~/.gitconfig", destination: "$HOME/.gitconfig" - # setup k8s and IStio - config.vm.provision "shell", path: "setup.sh" + # setup env + config.vm.provision "shell", path: "env-setup.sh" end diff --git a/contribution/vagrant/env-setup.sh b/contribution/vagrant/env-setup.sh new file mode 100755 index 0000000..4c9e348 --- /dev/null +++ b/contribution/vagrant/env-setup.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +# == Build Essential == # + +# update repo +sudo apt-get update + +# install build-essential +sudo apt-get install -y build-essential + +# == Containerd == # + +# add GPG key +sudo apt-get install -y curl ca-certificates gnupg +sudo install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +sudo chmod a+r /etc/apt/keyrings/docker.gpg + +# add docker repository +echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +# update the docker repo +sudo apt-get update + +# install containerd +sudo apt-get install -y containerd.io + +# set up the default config file +sudo mkdir -p /etc/containerd +sudo containerd config default | sudo tee /etc/containerd/config.toml +sudo sed -i "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml +sudo systemctl restart containerd + +# # == Kubernetes == # + +# install k3s +curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" INSTALL_K3S_EXEC="--disable=traefik" sh - + +echo "wait for initialization" +sleep 15 + +runtime="15 minute" +endtime=$(date -ud "$runtime" +%s) + +while [[ $(date -u +%s) -le $endtime ]] +do + status=$(kubectl get pods -A -o jsonpath={.items[*].status.phase}) + [[ $(echo $status | grep -v Running | wc -l) -eq 0 ]] && break + echo "wait for initialization" + sleep 1 +done + +# make kubectl accessable for vagrant user +mkdir -p /home/vagrant/.kube +sudo cp /etc/rancher/k3s/k3s.yaml /home/vagrant/.kube/config +sudo chown -R vagrant:vagrant /home/vagrant/.kube +echo "export KUBECONFIG=/home/vagrant/.kube/config" | tee -a /home/vagrant/.bashrc +PATH=$PATH:/bin:/usr/bin:/usr/local/bin + +# == Istio == # + +# move to home +cd /home/vagrant + +# download istio +curl -L https://istio.io/downloadIstio | sh - + +# copy istioctl to /usr/local/bin +sudo cp /home/vagrant/istio-*/bin/istioctl /usr/local/bin + +# change permissions +sudo chown -R vagrant:vagrant /home/vagrant/istio-* + +# install istio +su - vagrant -c "istioctl install --set profile=default -y" + +# == Docker == # + +# install Docker +sudo apt-get install -y docker-ce && sleep 5 + +# configure daemon.json +sudo mkdir -p /etc/docker +cat <> /home/vagrant/.bashrc +echo "export GOPATH=\$HOME/go" >> /home/vagrant/.bashrc +echo "export GOROOT=/usr/local/go" >> /home/vagrant/.bashrc +echo "export PATH=\$PATH:/usr/local/go/bin:\$HOME/go/bin" >> /home/vagrant/.bashrc +echo >> /home/vagrant/.bashrc + +# create a directory for Go +mkdir -p /home/vagrant/go +chown -R vagrant:vagrant /home/vagrant/go diff --git a/contribution/vagrant/install-scripts/install-kvm.sh b/contribution/vagrant/install-scripts/install-kvm.sh deleted file mode 100755 index 2ba5307..0000000 --- a/contribution/vagrant/install-scripts/install-kvm.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -. /etc/os-release - -if [ "$NAME" != "Ubuntu" ]; then - echo "This script is for Ubuntu." - exit -fi - -# install kvm and dependencies -sudo apt-get install -y bridge-utils libguestfs-tools \ - libvirt-daemon-system libvirt-clients libvirt-daemon libvirt-dev \ - qemu-system qemu-kvm virt-manager diff --git a/contribution/vagrant/install-scripts/install-vagrant-libvirt.sh b/contribution/vagrant/install-scripts/install-vagrant-libvirt.sh deleted file mode 100755 index 3678258..0000000 --- a/contribution/vagrant/install-scripts/install-vagrant-libvirt.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -. /etc/os-release - -if [ "$NAME" != "Ubuntu" ]; then - echo "This script is for Ubuntu." - exit -fi - -if [ ! -x "$(command -v vagrant)" ]; then - echo "Please install Vagrant first." -else - # update repo - sudo apt-get update - - # install build-essential - sudo apt-get install -y build-essential - - # install vagrant-libvirt - vagrant plugin install vagrant-libvirt -fi diff --git a/contribution/vagrant/install-scripts/install-virtualbox.sh b/contribution/vagrant/install-scripts/install-virtualbox.sh index b7768ca..8e84ae4 100755 --- a/contribution/vagrant/install-scripts/install-virtualbox.sh +++ b/contribution/vagrant/install-scripts/install-virtualbox.sh @@ -16,7 +16,7 @@ if [ ! -x "$(command -v vboxmanage)" ]; then # install vbox sudo apt-get update - sudo apt-get install virtualbox-6.1 + sudo apt-get install virtualbox echo "Please reboot the machine." else diff --git a/contribution/vagrant/setup.sh b/contribution/vagrant/setup.sh deleted file mode 100755 index 43a4ed7..0000000 --- a/contribution/vagrant/setup.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -# From BoanLab's tools scripts -git clone https://github.com/boanlab/tools.git - -# Install Docker -bash tools/containers/install-containerd.sh - -# Install Kubeadm -sudo apt-get update -sudo apt-get install -y apt-transport-https ca-certificates curl gpg -curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.24/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg -echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.24/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list -sudo sysctl -w net.ipv4.ip_forward=1 -sudo swapoff -a -sudo apt-get update -sudo apt-get install -y kubelet kubeadm kubectl -sudo apt-mark hold kubelet kubeadm kubectl - -# Disable Swap -sudo swapoff -a - -# Initialize Kubernetes for single node -export MULTI=false -bash tools/kubernetes/initialize-kubeadm.sh - -# Deploy Calico -export CNI=calico -bash tools/kubernetes/deploy-cni.sh - -# Make kubectl related commands accessable for vagrant user -sudo mkdir -p /home/vagrant/.kube -sudo cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config -sudo chown $(id -u vagrant):$(id -g vagrant) /home/vagrant/.kube/config - -# Now install Istio -sudo apt-get install make -curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.20.3 sh - -export PATH="$PATH:/home/vagrant/istio-1.20.3/bin" -istioctl install --set profile=default -y -sudo chown -R vagrant /home/vagrant/istio-1.20.3/ - -# Now install golang, this is for golint, gosec, gofmt -wget https://go.dev/dl/go1.22.0.linux-amd64.tar.gz -sudo rm -rf /usr/local/go -sudo tar -C /usr/local -xzf go1.22.0.linux-amd64.tar.gz -export PATH=$PATH:/usr/local/go/bin - -# Setup bashrc -echo export GOPATH="/home/vagrant/go" >> /home/vagrant/.bashrc -echo export PATH="$PATH:/usr/local/go/bin:/home/vagrant/istio-1.20.3/bin:/home/vagrant/go/bin/" >> /home/vagrant/.bashrc - -# Install protoc-gen-go and protoc-gen-go-grpc -RUN go install github.com/golang/protobuf/protoc-gen-go@latest -RUN go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest \ No newline at end of file diff --git a/deployments/ai-engine.yaml b/deployments/ai-engine.yaml index dfafe09..cce9e45 100644 --- a/deployments/ai-engine.yaml +++ b/deployments/ai-engine.yaml @@ -1,8 +1,8 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: ai-engine namespace: sentryflow + name: ai-engine spec: replicas: 1 selector: @@ -17,9 +17,9 @@ spec: - name: sentryflow image: 5gsec/sentryflow-ai-engine:v0.1 ports: - - containerPort: 5000 + - name: ai-engine protocol: TCP - name: grpc-sentryflow + containerPort: 5000 --- apiVersion: v1 kind: Service @@ -30,7 +30,7 @@ spec: selector: app: ai-engine ports: - - protocol: TCP - port: 5000 - targetPort: 5000 - name: grpc-sentryflow + - name: sentryflow-ai-engine + protocol: TCP + port: 5000 + targetPort: 5000 diff --git a/deployments/log-client.yaml b/deployments/log-client.yaml index 1a5dbf9..35b311f 100644 --- a/deployments/log-client.yaml +++ b/deployments/log-client.yaml @@ -1,8 +1,8 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: log-client namespace: sentryflow + name: log-client spec: replicas: 1 selector: @@ -16,18 +16,14 @@ spec: containers: - name: log-client image: 5gsec/sentryflow-log-client:v0.1 - ports: - - containerPort: 8080 - protocol: TCP - name: grpc env: - name: SERVER_ADDR value: "sentryflow.sentryflow.svc.cluster.local" - name: SERVER_PORT value: "8080" - - name: METRIC_FILTER - value: "envoy" - name: LOG_CFG value: "stdout" - name: METRIC_CFG value: "stdout" + - name: METRIC_FILTER + value: "api" diff --git a/deployments/mongo-client.yaml b/deployments/mongo-client.yaml index 10e1fbe..698ed46 100644 --- a/deployments/mongo-client.yaml +++ b/deployments/mongo-client.yaml @@ -1,8 +1,8 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: mongodb namespace: sentryflow + name: mongodb spec: replicas: 1 selector: @@ -14,29 +14,29 @@ spec: app: mongodb spec: containers: - - name: mongodb - image: mongo:latest - ports: - - containerPort: 27017 + - name: mongodb + image: mongo:latest + ports: + - containerPort: 27017 --- apiVersion: v1 kind: Service metadata: - name: mongodb namespace: sentryflow + name: mongodb spec: selector: app: mongodb ports: - - protocol: TCP - port: 27017 - targetPort: 27017 + - protocol: TCP + port: 27017 + targetPort: 27017 --- apiVersion: apps/v1 kind: Deployment metadata: - name: mongo-client namespace: sentryflow + name: mongo-client spec: replicas: 1 selector: @@ -48,7 +48,7 @@ spec: app: mongo-client spec: imagePullSecrets: - - name: regcred + - name: regcred containers: - name: mongo-client image: 5gsec/sentryflow-mongo-client:v0.1 @@ -57,5 +57,5 @@ spec: value: "sentryflow.sentryflow.svc.cluster.local" - name: SERVER_PORT value: "8080" - - name: MONGODB_HOST + - name: MONGODB_ADDR value: "mongodb://mongodb:27017" diff --git a/deployments/sentryflow.yaml b/deployments/sentryflow.yaml index 2af720f..d38da78 100644 --- a/deployments/sentryflow.yaml +++ b/deployments/sentryflow.yaml @@ -3,7 +3,7 @@ kind: Namespace metadata: name: sentryflow labels: - istio-injection: disabled # avoid Istio sidecar injection + istio-injection: disabled # avoid Istio sidecar-injection pod-security.kubernetes.io/audit: privileged pod-security.kubernetes.io/enforce: privileged pod-security.kubernetes.io/warn: privileged @@ -11,13 +11,13 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: sa-sentryflow namespace: sentryflow + name: sentryflow-sa --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: cr-sentryflow + name: sentryflow-cr rules: - apiGroups: ["*"] verbs: ["*"] @@ -26,21 +26,21 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: rb-sentryflow + name: sentryflow-rb roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: cr-sentryflow + name: sentryflow-cr subjects: - kind: ServiceAccount - name: sa-sentryflow namespace: sentryflow + name: sentryflow-sa --- apiVersion: apps/v1 kind: Deployment metadata: - name: sentryflow namespace: sentryflow + name: sentryflow spec: replicas: 1 selector: @@ -51,32 +51,32 @@ spec: labels: app: sentryflow spec: - serviceAccountName: sa-sentryflow + serviceAccountName: sentryflow-sa containers: - name: sentryflow image: 5gsec/sentryflow:v0.1 ports: - - containerPort: 4317 + - name: otel-grpc protocol: TCP - name: grpc-otlp - - containerPort: 8080 + containerPort: 4317 + - name: sentryflow-grpc protocol: TCP - name: grpc-export + containerPort: 8080 --- apiVersion: v1 kind: Service metadata: - name: sentryflow namespace: sentryflow + name: sentryflow spec: selector: app: sentryflow ports: - - protocol: TCP - port: 4317 - targetPort: 4317 - name: grpc-otlp - - protocol: TCP - port: 8080 - targetPort: 8080 - name: grpc-export + - name: otel-grpc + protocol: TCP + port: 4317 + targetPort: 4317 + - name: sentryflow-grpc + protocol: TCP + port: 8080 + targetPort: 8080 diff --git a/examples/README.md b/examples/README.md index 5e0978e..f162656 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,8 +1,4 @@ # Examples -The links below are organized by their level of complexity, starting from basic and progressing to more complex. - - [Single HTTP Requests](httpbin/README.md) - [RobotShop Demo Microservice](robotshop/README.md) -- [Nephio Free5gc Workload](nephio/free5gc/README.md) -- [Nephio OAI Workload](nephio/oai/README.md) diff --git a/examples/bookinfo/README.md b/examples/bookinfo/README.md index 4e78fa3..298fd13 100644 --- a/examples/bookinfo/README.md +++ b/examples/bookinfo/README.md @@ -8,6 +8,6 @@ https://istio.io/latest/docs/examples/bookinfo/ ```kubectl create -f telemetry.yaml``` -## Api request Generation +## API Request Generation ```curl http://bookinfo_Address:9080/productpage``` diff --git a/examples/bookinfo/telemetry.yaml b/examples/bookinfo/telemetry.yaml index 815fed0..7c434eb 100644 --- a/examples/bookinfo/telemetry.yaml +++ b/examples/bookinfo/telemetry.yaml @@ -1,8 +1,8 @@ apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: - name: bookinfo-logging namespace: bookinfo + name: bookinfo-logging spec: accessLogging: - providers: diff --git a/examples/httpbin/README.md b/examples/httpbin/README.md index 780360d..6b5e03e 100644 --- a/examples/httpbin/README.md +++ b/examples/httpbin/README.md @@ -1,6 +1,6 @@ # Single HTTP Requests -This document showcases how SentryFlow is capable of capturing API logs for straightforward HTTP requests. The demonstration employs Istio's `sleep` and `httpbin` examples for illustration. +This document demonstrates how SentryFlow effectively captures API logs for simple HTTP requests, using Istio's `sleep` and `httpbin` examples for illustration. It is essential to ensure that the `sleep` and `httpbin` deployments are correctly configured and that the default namespace has [Istio injection enabled](https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#automatic-sidecar-injection) for the setup to function properly. @@ -8,17 +8,18 @@ It is essential to ensure that the `sleep` and `httpbin` deployments are correct To confirm that Istio is set up correctly, start by verifying if the `default` namespace has Istio injection enabled. This can be done using the following command: -``` -$ kubectl describe namespace default +```bash +kubectl describe namespace default + Name: default Labels: istio-injection=enabled -... ``` If the namespace `default` has label `istio-injection=enabled`, this was set properly. Now, apply the `telemetry.yaml` in this directory by following command: -``` -$ kubectl create -f telemetry.yaml +```bash +kubectl create -f telemetry.yaml + telemetry.telemetry.istio.io/sleep-logging created ``` @@ -28,8 +29,9 @@ Executing this command will configure `telemetry` for Istio, instructing Envoy p To ensure that the pods in the `default` namespace are operational, execute the following command: -``` -$ kubectl get pods -n default +```bash +kubectl get pods -n default + NAME READY STATUS RESTARTS AGE httpbin-545f698b64-ncvq9 2/2 Running 0 44s sleep-75bbc86479-fmf4p 2/2 Running 0 35s @@ -39,35 +41,36 @@ sleep-75bbc86479-fmf4p 2/2 Running 0 35s Going forward, the `sleep` pod will initiate API requests to the `httpbin` service, which can be done using the following command: -``` -$ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) -$ kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418 +```bash +export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) +kubectl exec "$SOURCE_POD" -c sleep -- curl -sS -v httpbin:8000/status/418 ``` ## Step 3. Checking Logs There are two methods of checking logs with SentryFlow clients. -### 1. Logger +### 1. Log Client To examine the logs exported by SentryFlow, you can use the following command: -``` -$ kubectl logs -n sentryflow -l app=log-client -2024/02/14 17:03:37 [gRPC] Successfully connected to sentryflow.sentryflow.svc.cluster.local:8080 -2024/02/14 17:40:28 [Client] Received log: timeStamp:"[2024-02-14T17:40:27.225Z]" id:1707929670787152 srcNamespace:"default" srcName:"sleep-75bbc86479-fmf4p" srcLabel:{key:"app" value:"sleep"} srcLabel:{key:"pod-template-hash" value:"75bbc86479"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"sleep"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"10.244.140.11" srcPort:"44126" srcType:"Pod" dstNamespace:"default" dstName:"httpbin" dstLabel:{key:"app" value:"httpbin"} dstLabel:{key:"service" value:"httpbin"} dstIP:"10.105.103.198" dstPort:"8000" dstType:"Service" protocol:"HTTP/1.1" method:"GET" path:"/status/418" responseCode:418 -2024/02/14 17:40:29 [Client] Received log: timeStamp:"[2024-02-14T17:40:28.845Z]" id:1707929670787154 srcNamespace:"default" srcName:"sleep-75bbc86479-fmf4p" srcLabel:{key:"app" value:"sleep"} srcLabel:{key:"pod-template-hash" value:"75bbc86479"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"sleep"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"10.244.140.11" srcPort:"44158" srcType:"Pod" dstNamespace:"default" dstName:"httpbin" dstLabel:{key:"app" value:"httpbin"} dstLabel:{key:"service" value:"httpbin"} dstIP:"10.105.103.198" dstPort:"8000" dstType:"Service" protocol:"HTTP/1.1" method:"GET" path:"/status/418" responseCode:418 +```bash +kubectl logs -n sentryflow -l app=log-client + +YYYY/MM/DD 17:03:37 [gRPC] Successfully connected to sentryflow.sentryflow.svc.cluster.local:8080 +YYYY/MM/DD 17:40:28 [Client] Received log: timeStamp:"[YYYY/MM/DDT17:40:27.225Z]" id:1707929670787152 srcNamespace:"default" srcName:"sleep-75bbc86479-fmf4p" srcLabel:{key:"app" value:"sleep"} srcLabel:{key:"pod-template-hash" value:"75bbc86479"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"sleep"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"10.244.140.11" srcPort:"44126" srcType:"Pod" dstNamespace:"default" dstName:"httpbin" dstLabel:{key:"app" value:"httpbin"} dstLabel:{key:"service" value:"httpbin"} dstIP:"10.105.103.198" dstPort:"8000" dstType:"Service" protocol:"HTTP/1.1" method:"GET" path:"/status/418" responseCode:418 +YYYY/MM/DD 17:40:29 [Client] Received log: timeStamp:"[YYYY/MM/DDT17:40:28.845Z]" id:1707929670787154 srcNamespace:"default" srcName:"sleep-75bbc86479-fmf4p" srcLabel:{key:"app" value:"sleep"} srcLabel:{key:"pod-template-hash" value:"75bbc86479"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"sleep"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"10.244.140.11" srcPort:"44158" srcType:"Pod" dstNamespace:"default" dstName:"httpbin" dstLabel:{key:"app" value:"httpbin"} dstLabel:{key:"service" value:"httpbin"} dstIP:"10.105.103.198" dstPort:"8000" dstType:"Service" protocol:"HTTP/1.1" method:"GET" path:"/status/418" responseCode:418 ``` -As anticipated, we should be able to observe the `/status/418` API request being made from the `sleep` pod to the `httpbin` service. +As expected, we should be able to observe the `/status/418` API request being made from the `sleep` pod to the `httpbin` service. -### 2. MongoDB +### 2. MongoDB Client To inspect the data stored in MongoDB by SentryFlow, you can use the following command: -``` -$ export MONGODB_POD=$(kubectl get pod -n sentryflow -l app=mongodb -o jsonpath='{.items[0].metadata.name}') -$ kubectl exec -it $MONGODB_POD -n sentryflow mongosh +```bash +export MONGODB_POD=$(kubectl get pod -n sentryflow -l app=mongodb -o jsonpath='{.items[0].metadata.name}') +kubectl exec -it $MONGODB_POD -n sentryflow mongosh ``` Initiating this command will launch an interactive shell that can be used to explore the contents stored within the database. To examine the data in the database, refer to the subsequent commands provided. @@ -75,11 +78,11 @@ Initiating this command will launch an interactive shell that can be used to exp ``` test> use sentryflow; switched to db sentryflow -sentryflow> db["api-logs"].find() +sentryflow> db["APILogs"].find() [ { _id: ObjectId('65ccfa872b80bf0cec7dab83'), - timestamp: '[2024-02-14T17:38:14.330Z]', + timestamp: '[YYYY-MM-DDT17:38:14.330Z]', id: Long('1707929670787151'), srcnamespace: 'default', srcname: 'sleep-75bbc86479-fmf4p', diff --git a/examples/nephio/free5gc/README.md b/examples/nephio/free5gc/README.md deleted file mode 100644 index d49f0ce..0000000 --- a/examples/nephio/free5gc/README.md +++ /dev/null @@ -1,299 +0,0 @@ -# Nephio - Free5GC - -This example demonstrates capturing access logs from [Nephio](https://github.com/nephio-project/nephio), which operates on top of Istio using SentryFlow for log collection. - -> **Note**: The information about Nephio provided in this document may be outdated, as Nephio is currently in the early stages of development. - -## Step 1. Setting Up Nephio and Istio - -In this document, we will discuss monitoring `free5gc-cp` from the `regional` cluster to observe API activities within the control plane. - -> **Note**: To configure Nephio, please consult their official documentation available [here](https://github.com/nephio-project/docs/blob/main/content/en/docs/guides/user-guides/exercise-1-free5gc.md). Additionally, for the purpose of this document, it will be assumed that all steps up to and including **Step 6** have been executed correctly. - -Ensure that the Nephio `regional` cluster is functioning correctly, as well as the `free5gc-cp` namespaces within it. - -```bash -$ kubectl get pods --context regional-admin@regional -n free5gc-cp -NAME READY STATUS RESTARTS AGE -free5gc-ausf-69569f564b-7ttn5 1/1 Running 0 16s -free5gc-nrf-5978f8f797-xkhnl 1/1 Running 0 16s -free5gc-nssf-697b486564-gtpm5 1/1 Running 0 16s -free5gc-pcf-55d6c758bb-rhsm5 1/1 Running 0 16s -free5gc-udm-78464dcd7b-j6s7n 1/1 Running 0 16s -free5gc-udr-565445b596-7c6zw 1/1 Running 0 16s -free5gc-webui-ddd948585-nzkrf 1/1 Running 0 16s -mongodb-0 1/1 Running 0 7d9h -``` - -To gather access logs from within the namespace, Istio must be installed in the cluster. - -``` -$ istioctl install --set profile=default --context regional-admin@regional -This will install the Istio 1.20.2 "default" profile (with components: Istio core, Istiod, and Ingress gateways) into the cluster. Proceed? (y/N) y -✔ Istio core installed -✔ Istiod installed -✔ Ingress gateways installed -✔ Installation complete -Made this installation the default for injection and validation. -``` - -After successfully installing Istio in the cluster, you can verify that the Istio system is operational and running correctly by executing the following command: - -``` -$ kubectl get pods -n istio-system --context regional-admin@regional -``` - -## Step 2. Injecting Sidecars into Nephio - -Up to this point, Istio has been installed in the cluster where the `regional` cluster is operational. However, this does not necessarily mean that sidecar proxies are running alongside each pod. To ensure proper injection of sidecars into Nephio, the following steps need to be undertaken: - -### 2.1 Lowering Restriction: podSecurityStandard - -Nephio creates clusters for each type (e.g., `regional`, `edge01`, `edge02`) using **podSecurityContext**. By default, Nephio adheres to the following standards: - -- `enforce`: `baseline` -- `audit` and `warn`: `restricted` - -The security contexts employed by Nephio intentionally exclude the `NET_ADMIN` and `NET_RAW` capabilities, which are [required](https://istio.io/latest/docs/ops/deployment/requirements/) for the correct injection of the `istio-init` sidecar. Consequently, it is essential to explicitly designate these profiles as `privileged` across all namespaces to ensure Istio is injected properly. - -We can achieve this by: - -``` -$ kubectl label --overwrite ns --all pod-security.kubernetes.io/audit=privileged --context regional-admin@regional -$ kubectl label --overwrite ns --all pod-security.kubernetes.io/enforce=privileged --context regional-admin@regional -$ kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=privileged --context regional-admin@regional -``` - -> **Note**: Modifying `podSecurityStandard` via `kubectl edit cluster regional-admin@regional` will reset the settings to their defaults. Therefore, it's recommended to directly alter the namespace configuration instead. - -Now, verify if those labels were set properly by: - -``` -$ kubectl describe ns free5gc-cp --context regional-admin@regional -Name: free5gc-cp -Labels: app.kubernetes.io/managed-by=configmanagement.gke.io - configsync.gke.io/declared-version=v1 - kubernetes.io/metadata.name=free5gc-cp - pod-security.kubernetes.io/audit=privileged - pod-security.kubernetes.io/enforce=privileged - pod-security.kubernetes.io/warn=privileged -... -``` - -### 2.2 Preparing Sidecars - -To inject sidecars using Istio, we will label the namespaces accordingly. For the purposes of this demonstration, we will specifically label the `free5gc-cp` namespaces. - -``` -$ kubectl label namespace free5gc-cp istio-injection=enabled --overwrite --context regional-admin@regional -namespace/free5gc-cp labeled -``` - -## Step 3. Deploying SentryFlow - -Now is the moment to deploy SentryFlow. This can be accomplished by executing the following steps: - -``` -$ kubectl create -f ../../../deployments/sentryflow.yaml --context regional-admin@regional -namespace/sentryflow created -serviceaccount/sa-sentryflow created -clusterrole.rbac.authorization.k8s.io/cr-sentryflow created -clusterrolebinding.rbac.authorization.k8s.io/rb-sentryflow created -deployment.apps/sentryflow created -service/sentryflow created -``` - -Also, we can deploy exporters for SentryFlow by following these additional steps: - -``` -$ kubectl create -f ../../../deployments/log-client.yaml --context regional-admin@regional -deployment.apps/log-client created - -$ kubectl create -f ../../../deployments/mongo-client.yaml --context regional-admin@regional -deployment.apps/mongodb created -service/mongodb created -deployment.apps/mongo-client created -``` - -Verify if Pods in SentryFlow are properly by: - -``` -$ kubectl get pods -n sentryflow --context regional-admin@regional -NAME READY STATUS RESTARTS AGE -log-client-75695cd4d4-z6rns 1/1 Running 0 37s -mongo-client-67dfb6ffbb-4psdh 1/1 Running 0 37s -mongodb-575549748d-9n6lx 1/1 Running 0 37s -sentryflow-5bf9f6987c-kmpgx 1/1 Running 0 60s -``` - -> **Note**: -The `sentryflow` namespace will not have `istio-injection=enabled`. Enabling this would result in each OpenTelemetry export being logged as an access log, leading to an excessive number of logs being captured. - -> **Note**: Deploying `sentryflow` will automatically modify the Istio mesh configuration (`istio-system/istio`) to direct the export of access logs to it. - -## Step 4. Restarting Deployments - -Till now we have: -- Setup SentryFlow -- Prepared Istio injection -- Lowered podSecurityStandard - -However, this action alone will not yet produce any logs. To enable Numbat to collect access logs, it is necessary to add `telemetry` configurations and also restart the deployments under `free5gc-cp`. - -> **Note**: Restarting deployments before implementing telemetry will result in the sidecars not transmitting access logs to our collector. Hence, it is important to apply telemetry configurations prior to restarting the deployments. - -Telemetry can be configured to monitor the `free5gc-cp` namespace by executing the following steps: - -``` -$ kubectl create -f telemetry.yaml --context regional-admin@regional -telemetry.telemetry.istio.io/free5gc-logging created -``` - -To restart all deployments within the `free5gc-cp` namespace, you can proceed with the following command: - -> **Note**: Restarting deployments within the `free5gc-cp` namespace is necessary. If there are any jobs currently running, additional steps may be needed to manage those jobs during the restart process. - -``` -$ kubectl rollout restart deployment -n free5gc-cp --context regional-admin@regional -deployment.apps/free5gc-ausf restarted -deployment.apps/free5gc-nrf restarted -deployment.apps/free5gc-nssf restarted -deployment.apps/free5gc-pcf restarted -deployment.apps/free5gc-udm restarted -deployment.apps/free5gc-udr restarted -deployment.apps/free5gc-webui restarted -``` - -After issuing the rollout restart command, you can verify whether the Pods now include sidecars by executing the following command: - -``` -$ kubectl get pods --context regional-admin@regional -n free5gc-cp -NAME READY STATUS RESTARTS AGE -free5gc-ausf-7d56c5f8db-bk54f 2/2 Running 0 21s -free5gc-nrf-7f7db5c645-kxfrc 2/2 Running 0 21s -free5gc-nssf-5477f65b9b-kfmbt 2/2 Running 0 21s -free5gc-pcf-c7b8ff6bb-t2zrq 2/2 Running 0 21s -free5gc-udm-65947bb776-xs6vf 2/2 Running 0 21s -free5gc-udr-67f5fdf44d-4ckwd 2/2 Running 0 21s -free5gc-webui-cf788755c-9bwzd 2/2 Running 0 21s -mongodb-0 1/1 Running 0 7d10h -``` - -Observing that each Pod now contains 2 containers instead of just 1 indicates the presence of sidecars. To confirm that the additional container is indeed the `istio-proxy`, you can use the `kubectl describe` command for further verification. - -## Step 5. Checking Logs - -Starting from this point, `sentryflow` will begin receiving logs from each deployment. To examine how deployments within the `free5gc-cp` namespace are communicating, there are two methods available: using a log client and a mongo client. - -### 5.1 Checking Logger - -The `log-client` deployment is configured to receive logs from `sentryflow` in our specified export format and output these logs as stdout. To view live logs, you can use the following command: - -``` -$ kubectl logs -n sentryflow -l app=log-client -f --context regional-admin@regional -``` - -This will show live logs such as: - -``` -2024/02/12 20:37:19 [Client] Received log: timeStamp:"[2024-02-12T20:37:19.318Z]" id:1707769691204491 srcNamespace:"free5gc-cp" srcName:"free5gc-pcf-c7b8ff6bb-t2zrq" srcLabel:{key:"nf" value:"pcf"} srcLabel:{key:"pod-template-hash" value:"c7b8ff6bb"} srcLabel:{key:"project" value:"free5gc"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"free5gc-pcf"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"192.168.1.122" srcPort:"45542" srcType:"Pod" dstNamespace:"free5gc-cp" dstName:"nrf-nnrf" dstLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} dstLabel:{key:"app.kubernetes.io/version" value:"v3.1.1"} dstLabel:{key:"configsync.gke.io/declared-version" value:"v1"} dstLabel:{key:"nf" value:"nrf"} dstLabel:{key:"project" value:"free5gc"} dstIP:"10.141.104.225" dstPort:"8000" dstType:"Service" protocol:"HTTP/2" method:"GET" path:"/nnrf-disc/v1/nf-instances?requester-nf-type=PCF&service-names=nudr-dr&target-nf-type=UDR" responseCode:200 -2024/02/12 20:37:20 [Client] Received log: timeStamp:"[2024-02-12T20:37:20.292Z]" id:1707769691204493 srcNamespace:"free5gc-cp" srcName:"free5gc-udm-65947bb776-xs6vf" srcLabel:{key:"nf" value:"udm"} srcLabel:{key:"pod-template-hash" value:"65947bb776"} srcLabel:{key:"project" value:"free5gc"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"free5gc-udm"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"192.168.1.124" srcPort:"36488" srcType:"Pod" dstNamespace:"free5gc-cp" dstName:"nrf-nnrf" dstLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} dstLabel:{key:"app.kubernetes.io/version" value:"v3.1.1"} dstLabel:{key:"configsync.gke.io/declared-version" value:"v1"} dstLabel:{key:"nf" value:"nrf"} dstLabel:{key:"project" value:"free5gc"} dstIP:"10.141.104.225" dstPort:"8000" dstType:"Service" protocol:"HTTP/2" method:"PUT" path:"/nnrf-nfm/v1/nf-instances/8ac564d2-e5cc-421c-96cc-8c57b9c85ded" responseCode:201 -2024/02/12 20:37:23 [Client] Received log: timeStamp:"[2024-02-12T20:37:23.594Z]" id:1707769691204495 srcNamespace:"free5gc-cp" srcName:"free5gc-ausf-7d56c5f8db-bk54f" srcLabel:{key:"nf" value:"ausf"} srcLabel:{key:"pod-template-hash" value:"7d56c5f8db"} srcLabel:{key:"project" value:"free5gc"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"free5gc-ausf"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcIP:"192.168.1.126" srcPort:"35258" srcType:"Pod" dstNamespace:"free5gc-cp" dstName:"nrf-nnrf" dstLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} dstLabel:{key:"app.kubernetes.io/version" value:"v3.1.1"} dstLabel:{key:"configsync.gke.io/declared-version" value:"v1"} dstLabel:{key:"nf" value:"nrf"} dstLabel:{key:"project" value:"free5gc"} dstIP:"10.141.104.225" dstPort:"8000" dstType:"Service" protocol:"HTTP/2" method:"PUT" path:"/nnrf-nfm/v1/nf-instances/9e1ddaeb-898f-4504-a247-b4a78b329a74" responseCode:201 -``` - -### 5.2 Checking MongoDB - -We have another client (`mongo-client`) that stores all data received from the `sentryflow` into the MongoDB deployment. You can use `mongosh` to inspect the contents stored in MongoDB by executing the following command: - -``` -$ export MONGODB_POD=$(kubectl get pod -n sentryflow -l app=mongodb --context regional-admin@regional -o jsonpath='{.items[0].metadata.name}') -$ kubectl exec -it $MONGODB_POD -n sentryflow --context regional-admin@regional mongosh -``` - -Once we have entered `mongosh` we can check entries stored in the DB. SentryFlow uses DB named `sentryflow` and collection `access-logs` for storing access logs. - -An example command of checking all access logs stored in DB would be: - -``` -test> use sentryflow -use sentryflow -sentryflow> db["api-logs"].find() -... - { - _id: ObjectId('65ca77e4ef0f86784e2fa544'), - timestamp: '[2024-02-12T19:56:19.298Z]', - id: Long('1707767512691239'), - srcnamespace: 'free5gc-cp', - srcname: 'free5gc-nssf-566df8589f-4wwt9', - srclabel: { - 'pod-template-hash': '566df8589f', - project: 'free5gc', - 'security.istio.io/tlsMode': 'istio', - 'service.istio.io/canonical-name': 'free5gc-nssf', - 'service.istio.io/canonical-revision': 'latest', - nf: 'nssf' - }, - srcip: '192.168.1.105', - srcport: '53008', - srctype: 'Pod', - dstnamespace: 'free5gc-cp', - dstname: 'nrf-nnrf', - dstlabel: { - 'app.kubernetes.io/managed-by': 'configmanagement.gke.io', - 'app.kubernetes.io/version': 'v3.1.1', - 'configsync.gke.io/declared-version': 'v1', - nf: 'nrf', - project: 'free5gc' - }, - dstip: '10.141.104.225', - dstport: '8000', - dsttype: 'Service', - protocol: 'HTTP/2', - method: 'PUT', - path: '/nnrf-nfm/v1/nf-instances/99608079-71a4-48cd-9e0c-be0837655d2f', - responsecode: Long('201') - }, -... -``` - -Another example would involve filtering out only logs with `protocol":"HTTP/1.1` to specifically examine API calls: - -``` -sentryflow> db["access-logs"].find({"protocol":"HTTP/1.1"}) -... - { - _id: ObjectId('65ca77e4ef0f86784e2fa545'), - timestamp: '[2024-02-12T19:56:19.350Z]', - id: Long('1707767512691241'), - srcnamespace: 'free5gc-cp', - srcname: 'free5gc-nssf-566df8589f-4wwt9', - srclabel: { - 'security.istio.io/tlsMode': 'istio', - 'service.istio.io/canonical-name': 'free5gc-nssf', - 'service.istio.io/canonical-revision': 'latest', - nf: 'nssf', - 'pod-template-hash': '566df8589f', - project: 'free5gc' - }, - srcip: '192.168.1.105', - srcport: '45888', - srctype: 'Pod', - dstnamespace: 'free5gc-cp', - dstname: 'free5gc-nrf-6f6484c6cb-cpnzk', - dstlabel: { - nf: 'nrf', - 'pod-template-hash': '6f6484c6cb', - project: 'free5gc', - 'security.istio.io/tlsMode': 'istio', - 'service.istio.io/canonical-name': 'free5gc-nrf', - 'service.istio.io/canonical-revision': 'latest' - }, - dstip: '192.168.1.94', - dstport: '8000', - dsttype: 'Pod', - protocol: 'HTTP/1.1', - method: 'PUT', - path: '/nnrf-nfm/v1/nf-instances/99608079-71a4-48cd-9e0c-be0837655d2f', - responsecode: Long('201') -... -``` diff --git a/examples/nephio/free5gc/telemetry.yaml b/examples/nephio/free5gc/telemetry.yaml deleted file mode 100644 index 4e40892..0000000 --- a/examples/nephio/free5gc/telemetry.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: telemetry.istio.io/v1alpha1 -kind: Telemetry -metadata: - name: free5gc-logging - namespace: istio-system -spec: - accessLogging: - - providers: - - name: sentryflow diff --git a/examples/nephio/oai/README.md b/examples/nephio/oai/README.md deleted file mode 100644 index 18bdc38..0000000 --- a/examples/nephio/oai/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# Nephio - OAI - - -This example demonstrates how to capture access logs from the [Nephio](https://github.com/nephio-project/nephio)'s OAI Demo, which operates on top of Istio, utilizing SentryFlow for log capture. - -> **Note**: The information about Nephio provided in this document may be outdated, as Nephio is currently in the early stages of development. - -## Step 1. Setting Up Nephio and Istio - -In this document, we will discuss how to monitor the `oai-core` component within the `core` cluster to observe API activities in the control plane. - -> **Note**: To set up Nephio, please consult the official OAI documentation available [here](https://github.com/nephio-project/docs/blob/main/content/en/docs/guides/user-guides/exercise-2-oai.md). For the purposes of this document, it will be assumed that all steps up to and including **Step 5** have been executed correctly. - -Ensure that the Nephio `core` cluster is functioning correctly, as well as the `oai-core` namespaces within it. - -```bash -$ kubectl get pods -n oai-core --context core-admin@core -NAME READY STATUS RESTARTS AGE -amf-core-56c68b7487-g2clh 1/1 Running 0 10h -ausf-core-7885cb865-hd9pz 1/1 Running 0 10h -mysql-7dd4cc6945-pj6xz 1/1 Running 0 10h -nrf-core-d4f69557d-wptds 1/1 Running 0 10h -smf-core-59bcf4576c-t6rwr 1/1 Running 0 10h -udm-core-c7d67cb4d-r4zwn 1/1 Running 0 10h -udr-core-69c56bcbd5-whjb9 1/1 Running 0 10h -``` - -To gather access logs from within the namespace, Istio must be installed in the cluster. - -``` -$ istioctl install --set profile=default --context core-admin@core -This will install the Istio 1.20.2 "default" profile (with components: Istio core, Istiod, and Ingress gateways) into the cluster. Proceed? (y/N) y -✔ Istio core installed -✔ Istiod installed -✔ Ingress gateways installed -✔ Installation complete -Made this installation the default for injection and validation. -``` - -After successfully installing Istio in the cluster, you can verify that the Istio system is operational and running correctly by executing the following command: - -``` -$ kubectl get pods -n istio-system --context core-admin@core -``` - -## Step 2. Injecting Sidecars into Nephio - -Up to this point, Istio has been installed in the cluster where the `edge` cluster is operational. However, this does not necessarily mean that sidecar proxies are running alongside each pod. To ensure proper injection of sidecars into Nephio, the following steps need to be undertaken: - -### 2.1 Lowering Restriction: podSecurityStandard - -Nephio creates clusters for each type (e.g., `core`, `edge`, `regional`) using **podSecurityContext**. By default, Nephio adheres to the following standards: - -- `enforce`: `baseline` -- `audit` and `warn`: `restricted` - -The security contexts employed by Nephio intentionally exclude the `NET_ADMIN` and `NET_RAW` capabilities, which are [required](https://istio.io/latest/docs/ops/deployment/requirements/) for the correct injection of the `istio-init` sidecar. Consequently, it is essential to explicitly designate these profiles as `privileged` across all namespaces to ensure Istio is injected properly. - -We can achieve this by: - -``` -$ kubectl label --overwrite ns --all pod-security.kubernetes.io/audit=privileged --context core-admin@core -$ kubectl label --overwrite ns --all pod-security.kubernetes.io/enforce=privileged --context core-admin@core -$ kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=privileged --context core-admin@core -``` - -> **Note**: Modifying `podSecurityStandard` via `kubectl edit cluster regional-admin@regional` will reset the settings to their defaults. Therefore, it's recommended to directly alter the namespace configuration instead. - -Now, verify if those labels were set properly by: - -``` -$ kubectl describe ns oai-core --context core-admin@core -Name: oai-core -Labels: app.kubernetes.io/managed-by=configmanagement.gke.io - configsync.gke.io/declared-version=v1 - kubernetes.io/metadata.name=oai-core - pod-security.kubernetes.io/audit=privileged - pod-security.kubernetes.io/enforce=privileged - pod-security.kubernetes.io/warn=privileged -... -``` - -### 2.2 Preparing Sidecars - -To inject sidecars using Istio, we will label the namespaces accordingly. For the purposes of this demonstration, we will specifically label the `core-admin@core` namespaces. - -``` -$ kubectl label namespace oai-core istio-injection=enabled --overwrite --context core-admin@core -namespace/oai-core labeled -``` - -## Step 3. Deploying SentryFlow - -Now is the moment to deploy SentryFlow. This can be accomplished by executing the following steps: - -``` -$ kubectl create -f ../../../deployments/sentryflow.yaml --context core-admin@core -namespace/sentryflow created -serviceaccount/sa-sentryflow created -clusterrole.rbac.authorization.k8s.io/cr-sentryflow created -clusterrolebinding.rbac.authorization.k8s.io/rb-sentryflow created -deployment.apps/sentryflow created -service/sentryflow created -``` - -Also, we can deploy exporters for SentryFlow by following these additional steps: - -``` -$ kubectl create -f ../../../deployments/log-client.yaml --context core-admin@core -deployment.apps/log-client created - -$ kubectl create -f ../../../deployments/mongo-client.yaml --context regional-admin@regional -deployment.apps/mongodb created -service/mongodb created -deployment.apps/mongo-client created -``` - -Verify if Pods in SentryFlow are properly by: - -``` -$ kubectl get pods -n sentryflow --context regional-admin@regional -NAME READY STATUS RESTARTS AGE -log-client-75695cd4d4-z6rns 1/1 Running 0 37s -mongo-client-67dfb6ffbb-4psdh 1/1 Running 0 37s -mongodb-575549748d-9n6lx 1/1 Running 0 37s -sentryflow-5bf9f6987c-kmpgx 1/1 Running 0 60s -``` - -> **Note**: -The `sentryflow` namespace will not have `istio-injection=enabled`. Enabling this would result in each OpenTelemetry export being logged as an access log, leading to an excessive number of logs being captured. - -> **Note**: Deploying `sentryflow` will automatically modify the Istio mesh configuration (`istio-system/istio`) to direct the export of access logs to it. - -## Step 4. Restarting Deployments - -Till now we have: -- Setup SentryFlow -- Prepared Istio injection -- Lowered podSecurityStandard - -However, this action alone will not yet produce any logs. To enable Numbat to collect access logs, it is necessary to add `telemetry` configurations and also restart the deployments under `oai-logging`. - -> **Note**: Restarting deployments before implementing telemetry will result in the sidecars not transmitting access logs to our collector. Hence, it is important to apply telemetry configurations prior to restarting the deployments. - -Telemetry can be configured to monitor the `oai-logging` namespace by executing the following steps: - -``` -$ kubectl create -f ./telemetry.yaml --context core-admin@core -telemetry.telemetry.istio.io/oai-logging created -``` - -To restart all deployments within the `oai-logging` namespace, you can proceed with the following command: - -> **Note**: Restarting deployments within the `oai-logging` namespace is necessary. If there are any jobs currently running, additional steps may be needed to manage those jobs during the restart process. - -``` -$ kubectl rollout restart deployments -n oai-core --context core-admin@core -deployment.apps/amf-core restarted -deployment.apps/ausf-core restarted deployment.apps/mysql restarted -deployment.apps/nrf-core restarted -deployment.apps/smf-core restarted -deployment.apps/udm-core restarted -deployment.apps/udr-core restarted -``` - -After issuing the rollout restart command, you can verify whether the Pods now include sidecars by executing the following command: - -``` -$ kubectl get pods -n oai-core --context core-admin@core -NAME READY STATUS RESTARTS AGE -amf-core-76967858c4-w4mlt 2/2 Running 0 8m3s -ausf-core-6bfd5576c5-sprb4 2/2 Running 0 8m10s -mysql-764b8f5ff5-7hgcv 2/2 Running 0 8m2s -nrf-core-5c74f7cdb4-mrk4w 2/2 Running 0 8m10s -smf-core-57bbdf59c4-x4jnk 2/2 Running 0 8m5s -udm-core-85c5478b94-bm4mv 2/2 Running 0 8m10s -... -``` - -Observing that each Pod now contains 2 containers instead of just 1 indicates the presence of sidecars. To confirm that the additional container is indeed the `istio-proxy`, you can use the `kubectl describe` command for further verification. - -## Step 5. Checking Logs - -Starting from this point, `sentryflow` will begin receiving logs from each deployment. To examine how deployments within the `oai-core` namespace are communicating, there are two methods available: using a log client and a mongo client. - -### 5.1 Checking Logs using a Log Client - -The `log-client` deployment is configured to receive logs from `sentryflow` in our specified export format and output these logs as stdout. To view live logs, you can use the following command: - -``` -$ kubectl logs -n sentryflow -l app=log-client -f --context core-admin@core -``` - -This will show live logs such as: - -``` -2024/02/15 03:45:30 [Client] Received log: timeStamp:"[2024-02-15T03:45:30.153Z]" id:1707968675718909 srcNamespace:"oai-core" srcName:"smf-core-57bbdf59c4-x4jnk" srcLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} srcLabel:{key:"configsync.gke.io/declared-version" value:"v1alpha1"} srcLabel:{key:"pod-template-hash" value:"57bbdf59c4"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"smf-core"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcLabel:{key:"workload.nephio.org/oai" value:"smf"} srcIP:"192.168.1.57" srcPort:"42954" srcType:"Pod" dstNamespace:"oai-core" dstName:"nrf-core-5c74f7cdb4-mrk4w" dstLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} dstLabel:{key:"configsync.gke.io/declared-version" value:"v1alpha1"} dstLabel:{key:"pod-template-hash" value:"5c74f7cdb4"} dstLabel:{key:"security.istio.io/tlsMode" value:"istio"} dstLabel:{key:"service.istio.io/canonical-name" value:"nrf-core"} dstLabel:{key:"service.istio.io/canonical-revision" value:"latest"} dstLabel:{key:"workload.nephio.org/oai" value:"nrf"} dstIP:"192.168.1.55" dstPort:"80" dstType:"Pod" protocol:"HTTP/2" method:"GET" path:"/nnrf-nfm/v1/nf-instances?nf-type=NRF" responseCode:503 -2024/02/15 03:45:30 [Client] Received log: timeStamp:"[2024-02-15T03:45:30.732Z]" id:1707968675718911 srcNamespace:"oai-core" srcName:"udm-core-85c5478b94-bm4mv" srcLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} srcLabel:{key:"configsync.gke.io/declared-version" value:"v1alpha1"} srcLabel:{key:"pod-template-hash" value:"85c5478b94"} srcLabel:{key:"security.istio.io/tlsMode" value:"istio"} srcLabel:{key:"service.istio.io/canonical-name" value:"udm-core"} srcLabel:{key:"service.istio.io/canonical-revision" value:"latest"} srcLabel:{key:"workload.nephio.org/oai" value:"udm"} srcIP:"192.168.1.54" srcPort:"48406" srcType:"Pod" dstNamespace:"oai-core" dstName:"nrf-core-5c74f7cdb4-mrk4w" dstLabel:{key:"app.kubernetes.io/managed-by" value:"configmanagement.gke.io"} dstLabel:{key:"configsync.gke.io/declared-version" value:"v1alpha1"} dstLabel:{key:"pod-template-hash" value:"5c74f7cdb4"} dstLabel:{key:"security.istio.io/tlsMode" value:"istio"} dstLabel:{key:"service.istio.io/canonical-name" value:"nrf-core"} dstLabel:{key:"service.istio.io/canonical-revision" value:"latest"} dstLabel:{key:"workload.nephio.org/oai" value:"nrf"} dstIP:"192.168.1.55" dstPort:"80" dstType:"Pod" protocol:"HTTP/2" method:"GET" path:"/nnrf-nfm/v1/nf-instances?nf-type=NRF" responseCode:503 -``` - -### 5.2 Checking Logs in MongoDB - - -We have another client (`mongo-client`) that stores all data received from the `sentryflow` into the MongoDB deployment. You can use `mongosh` to inspect the contents stored in MongoDB by executing the following command: - -``` -$ export MONGODB_POD=$(kubectl get pod -n sentryflow -l app=mongodb --context core-admin@core -o jsonpath='{.items[0].metadata.name}') -$ kubectl exec -it $MONGODB_POD -n sentryflow --context core-admin@core mongosh -``` - -Once we have entered `mongosh` we can check entries stored in the DB. SentryFlow uses DB named `sentryflow` and collection `api-logs` for storing access logs. - -An example command for retrieving all access logs stored in the database would be: - -``` -test> use sentryflow -use sentryflow -sentryflow> db["api-logs"].find() -... - { - _id: ObjectId('65ca77e4ef0f86784e2fa544'), - timestamp: '[2024-02-12T19:56:19.298Z]', - id: Long('1707767512691239'), - srcnamespace: 'free5gc-cp', - srcname: 'free5gc-nssf-566df8589f-4wwt9', - srclabel: { - 'pod-template-hash': '566df8589f', - project: 'free5gc', - 'security.istio.io/tlsMode': 'istio', - 'service.istio.io/canonical-name': 'free5gc-nssf', - 'service.istio.io/canonical-revision': 'latest', - nf: 'nssf' - }, - srcip: '192.168.1.105', - srcport: '53008', - srctype: 'Pod', - dstnamespace: 'free5gc-cp', - dstname: 'nrf-nnrf', - dstlabel: { - 'app.kubernetes.io/managed-by': 'configmanagement.gke.io', - 'app.kubernetes.io/version': 'v3.1.1', - 'configsync.gke.io/declared-version': 'v1', - nf: 'nrf', - project: 'free5gc' - }, - dstip: '10.141.104.225', - dstport: '8000', - dsttype: 'Service', - protocol: 'HTTP/2', - method: 'PUT', - path: '/nnrf-nfm/v1/nf-instances/99608079-71a4-48cd-9e0c-be0837655d2f', - responsecode: Long('201') - }, -... -``` - -Another example would involve filtering out only logs with `protocol":"HTTP/2` to specifically examine API calls: - -``` -sentryflow> db["api-logs"].find({"protocol":"HTTP/2"}) -... - { - _id: ObjectId('65cd871bb9e996068ab49250'), - timestamp: '[2024-02-15T03:38:02.636Z]', - id: Long('1707968025200999'), - srcnamespace: 'kube-system', - srcname: 'kube-scheduler-core-cxfgb-gt8tr', - srclabel: { component: 'kube-scheduler', tier: 'control-plane' }, - srcip: '172.18.0.5', - srcport: '3479', - srctype: 'Pod', - dstnamespace: 'oai-core', - dstname: 'nrf-core-696b59c448-4dn52', - dstlabel: { - 'pod-template-hash': '696b59c448', - 'security.istio.io/tlsMode': 'istio', - 'service.istio.io/canonical-name': 'nrf-core', - 'service.istio.io/canonical-revision': 'latest', - 'workload.nephio.org/oai': 'nrf', - 'app.kubernetes.io/managed-by': 'configmanagement.gke.io', - 'configsync.gke.io/declared-version': 'v1alpha1' - }, - dstip: '192.168.1.35', - dstport: '80', - dsttype: 'Pod', - protocol: 'HTTP/2', - method: 'PATCH', - path: '/nnrf-nfm/v1/nf-instances/863bdd79-b36b-4c85-b6ed-61bfc1cb5de3', - responsecode: Long('503') - }, -... -``` diff --git a/examples/nephio/oai/telemetry.yaml b/examples/nephio/oai/telemetry.yaml deleted file mode 100644 index d7df065..0000000 --- a/examples/nephio/oai/telemetry.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: telemetry.istio.io/v1alpha1 -kind: Telemetry -metadata: - name: oai-logging - namespace: istio-system -spec: - accessLogging: - - providers: - - name: sentryflow diff --git a/examples/robotshop/telemetry.yaml b/examples/robotshop/telemetry.yaml index 9504c0c..5cc4e8f 100644 --- a/examples/robotshop/telemetry.yaml +++ b/examples/robotshop/telemetry.yaml @@ -1,8 +1,8 @@ apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: - name: robot-shop-logging namespace: robot-shop + name: robot-shop-logging spec: accessLogging: - providers: diff --git a/protobuf/Makefile b/protobuf/Makefile index 7f037d1..fb48b51 100644 --- a/protobuf/Makefile +++ b/protobuf/Makefile @@ -5,6 +5,15 @@ PB_GO:=$(PROTO:.proto=.pb.go) build: $(PB_GO) go.sum %.pb.go: %.proto +ifeq (, $(shell which protoc)) + sudo cp bin/protoc /usr/local/bin/protoc +endif +ifeq (, $(shell which protoc-gen-go)) + go install google.golang.org/protobuf/cmd/protoc-gen-go@latest +endif +ifeq (, $(shell which protoc-gen-go-grpc)) + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest +endif go mod tidy protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative,require_unimplemented_servers=false $< diff --git a/protobuf/go.mod b/protobuf/go.mod index 50886ac..2038b4d 100644 --- a/protobuf/go.mod +++ b/protobuf/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 ) require ( diff --git a/protobuf/go.sum b/protobuf/go.sum index e1723cb..c475bdd 100644 --- a/protobuf/go.sum +++ b/protobuf/go.sum @@ -10,5 +10,5 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/protobuf/sentryflow.pb.go b/protobuf/sentryflow.pb.go index f70c206..d664634 100644 --- a/protobuf/sentryflow.pb.go +++ b/protobuf/sentryflow.pb.go @@ -85,19 +85,19 @@ type APILog struct { SrcNamespace string `protobuf:"bytes,11,opt,name=srcNamespace,proto3" json:"srcNamespace,omitempty"` SrcName string `protobuf:"bytes,12,opt,name=srcName,proto3" json:"srcName,omitempty"` SrcLabel map[string]string `protobuf:"bytes,13,rep,name=srcLabel,proto3" json:"srcLabel,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SrcType string `protobuf:"bytes,16,opt,name=srcType,proto3" json:"srcType,omitempty"` - SrcIP string `protobuf:"bytes,17,opt,name=srcIP,proto3" json:"srcIP,omitempty"` - SrcPort string `protobuf:"bytes,18,opt,name=srcPort,proto3" json:"srcPort,omitempty"` - DstNamespace string `protobuf:"bytes,21,opt,name=dstNamespace,proto3" json:"dstNamespace,omitempty"` - DstName string `protobuf:"bytes,22,opt,name=dstName,proto3" json:"dstName,omitempty"` - DstLabel map[string]string `protobuf:"bytes,23,rep,name=dstLabel,proto3" json:"dstLabel,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DstType string `protobuf:"bytes,26,opt,name=dstType,proto3" json:"dstType,omitempty"` - DstIP string `protobuf:"bytes,27,opt,name=dstIP,proto3" json:"dstIP,omitempty"` - DstPort string `protobuf:"bytes,28,opt,name=dstPort,proto3" json:"dstPort,omitempty"` - Protocol string `protobuf:"bytes,31,opt,name=protocol,proto3" json:"protocol,omitempty"` - Method string `protobuf:"bytes,32,opt,name=method,proto3" json:"method,omitempty"` - Path string `protobuf:"bytes,33,opt,name=path,proto3" json:"path,omitempty"` - ResponseCode int32 `protobuf:"varint,34,opt,name=responseCode,proto3" json:"responseCode,omitempty"` + SrcType string `protobuf:"bytes,21,opt,name=srcType,proto3" json:"srcType,omitempty"` + SrcIP string `protobuf:"bytes,22,opt,name=srcIP,proto3" json:"srcIP,omitempty"` + SrcPort string `protobuf:"bytes,23,opt,name=srcPort,proto3" json:"srcPort,omitempty"` + DstNamespace string `protobuf:"bytes,31,opt,name=dstNamespace,proto3" json:"dstNamespace,omitempty"` + DstName string `protobuf:"bytes,32,opt,name=dstName,proto3" json:"dstName,omitempty"` + DstLabel map[string]string `protobuf:"bytes,33,rep,name=dstLabel,proto3" json:"dstLabel,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DstType string `protobuf:"bytes,41,opt,name=dstType,proto3" json:"dstType,omitempty"` + DstIP string `protobuf:"bytes,42,opt,name=dstIP,proto3" json:"dstIP,omitempty"` + DstPort string `protobuf:"bytes,43,opt,name=dstPort,proto3" json:"dstPort,omitempty"` + Protocol string `protobuf:"bytes,51,opt,name=protocol,proto3" json:"protocol,omitempty"` + Method string `protobuf:"bytes,52,opt,name=method,proto3" json:"method,omitempty"` + Path string `protobuf:"bytes,53,opt,name=path,proto3" json:"path,omitempty"` + ResponseCode int32 `protobuf:"varint,54,opt,name=responseCode,proto3" json:"responseCode,omitempty"` } func (x *APILog) Reset() { @@ -258,7 +258,7 @@ func (x *APILog) GetResponseCode() int32 { return 0 } -type APIMetric struct { +type APIMetrics struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -266,8 +266,8 @@ type APIMetric struct { PerAPICounts map[string]uint64 `protobuf:"bytes,1,rep,name=perAPICounts,proto3" json:"perAPICounts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // @todo: add some more metrics here } -func (x *APIMetric) Reset() { - *x = APIMetric{} +func (x *APIMetrics) Reset() { + *x = APIMetrics{} if protoimpl.UnsafeEnabled { mi := &file_sentryflow_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -275,13 +275,13 @@ func (x *APIMetric) Reset() { } } -func (x *APIMetric) String() string { +func (x *APIMetrics) String() string { return protoimpl.X.MessageStringOf(x) } -func (*APIMetric) ProtoMessage() {} +func (*APIMetrics) ProtoMessage() {} -func (x *APIMetric) ProtoReflect() protoreflect.Message { +func (x *APIMetrics) ProtoReflect() protoreflect.Message { mi := &file_sentryflow_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -293,12 +293,12 @@ func (x *APIMetric) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use APIMetric.ProtoReflect.Descriptor instead. -func (*APIMetric) Descriptor() ([]byte, []int) { +// Deprecated: Use APIMetrics.ProtoReflect.Descriptor instead. +func (*APIMetrics) Descriptor() ([]byte, []int) { return file_sentryflow_proto_rawDescGZIP(), []int{2} } -func (x *APIMetric) GetPerAPICounts() map[string]uint64 { +func (x *APIMetrics) GetPerAPICounts() map[string]uint64 { if x != nil { return x.PerAPICounts } @@ -352,21 +352,21 @@ func (x *MetricValue) GetValue() map[string]string { return nil } -type EnvoyMetric struct { +type EnvoyMetrics struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` - Labels map[string]string `protobuf:"bytes,7,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - PodIP string `protobuf:"bytes,3,opt,name=podIP,proto3" json:"podIP,omitempty"` - TimeStamp string `protobuf:"bytes,5,opt,name=timeStamp,proto3" json:"timeStamp,omitempty"` - Metric map[string]*MetricValue `protobuf:"bytes,6,rep,name=metric,proto3" json:"metric,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TimeStamp string `protobuf:"bytes,1,opt,name=timeStamp,proto3" json:"timeStamp,omitempty"` + Namespace string `protobuf:"bytes,11,opt,name=namespace,proto3" json:"namespace,omitempty"` + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + IPAddress string `protobuf:"bytes,13,opt,name=IPAddress,proto3" json:"IPAddress,omitempty"` + Labels map[string]string `protobuf:"bytes,14,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Metrics map[string]*MetricValue `protobuf:"bytes,21,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *EnvoyMetric) Reset() { - *x = EnvoyMetric{} +func (x *EnvoyMetrics) Reset() { + *x = EnvoyMetrics{} if protoimpl.UnsafeEnabled { mi := &file_sentryflow_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -374,13 +374,13 @@ func (x *EnvoyMetric) Reset() { } } -func (x *EnvoyMetric) String() string { +func (x *EnvoyMetrics) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EnvoyMetric) ProtoMessage() {} +func (*EnvoyMetrics) ProtoMessage() {} -func (x *EnvoyMetric) ProtoReflect() protoreflect.Message { +func (x *EnvoyMetrics) ProtoReflect() protoreflect.Message { mi := &file_sentryflow_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -392,49 +392,49 @@ func (x *EnvoyMetric) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EnvoyMetric.ProtoReflect.Descriptor instead. -func (*EnvoyMetric) Descriptor() ([]byte, []int) { +// Deprecated: Use EnvoyMetrics.ProtoReflect.Descriptor instead. +func (*EnvoyMetrics) Descriptor() ([]byte, []int) { return file_sentryflow_proto_rawDescGZIP(), []int{4} } -func (x *EnvoyMetric) GetNamespace() string { +func (x *EnvoyMetrics) GetTimeStamp() string { if x != nil { - return x.Namespace + return x.TimeStamp } return "" } -func (x *EnvoyMetric) GetName() string { +func (x *EnvoyMetrics) GetNamespace() string { if x != nil { - return x.Name + return x.Namespace } return "" } -func (x *EnvoyMetric) GetLabels() map[string]string { +func (x *EnvoyMetrics) GetName() string { if x != nil { - return x.Labels + return x.Name } - return nil + return "" } -func (x *EnvoyMetric) GetPodIP() string { +func (x *EnvoyMetrics) GetIPAddress() string { if x != nil { - return x.PodIP + return x.IPAddress } return "" } -func (x *EnvoyMetric) GetTimeStamp() string { +func (x *EnvoyMetrics) GetLabels() map[string]string { if x != nil { - return x.TimeStamp + return x.Labels } - return "" + return nil } -func (x *EnvoyMetric) GetMetric() map[string]*MetricValue { +func (x *EnvoyMetrics) GetMetrics() map[string]*MetricValue { if x != nil { - return x.Metric + return x.Metrics } return nil } @@ -460,28 +460,28 @@ var file_sentryflow_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4c, 0x6f, 0x67, 0x2e, 0x53, 0x72, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x72, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x72, 0x63, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x72, 0x63, 0x49, 0x50, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, + 0x70, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x72, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x72, 0x63, 0x49, 0x50, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x72, 0x63, 0x49, 0x50, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, - 0x72, 0x74, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, + 0x72, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x3a, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x17, 0x20, 0x03, 0x28, + 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x3a, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4c, 0x6f, 0x67, 0x2e, 0x44, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x64, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x64, - 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x73, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x73, 0x74, 0x49, 0x50, 0x18, 0x1b, + 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x73, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x73, 0x74, 0x49, 0x50, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x64, 0x73, 0x74, 0x49, 0x50, 0x12, 0x18, 0x0a, 0x07, 0x64, - 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x73, + 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x6c, 0x18, 0x33, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x34, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, - 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x22, 0x20, + 0x68, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, + 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x36, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, 0x53, 0x72, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -490,63 +490,64 @@ var file_sentryflow_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x44, 0x73, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x97, 0x01, 0x0a, 0x09, - 0x41, 0x50, 0x49, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x49, 0x0a, 0x0c, 0x70, 0x65, 0x72, - 0x41, 0x50, 0x49, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x2e, 0x50, 0x65, 0x72, 0x41, 0x50, 0x49, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x72, 0x41, 0x50, 0x49, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x41, 0x50, 0x49, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7f, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x38, 0x0a, 0x0a, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf6, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x6f, 0x79, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x6f, 0x64, 0x49, 0x50, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x70, 0x6f, 0x64, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x39, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x0a, + 0x41, 0x50, 0x49, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x4a, 0x0a, 0x0c, 0x70, 0x65, + 0x72, 0x41, 0x50, 0x49, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x50, 0x65, 0x72, 0x41, 0x50, 0x49, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x72, 0x41, 0x50, 0x49, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x41, 0x50, 0x49, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7f, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x38, + 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x85, 0x03, 0x0a, 0x0c, 0x45, 0x6e, 0x76, + 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x49, 0x50, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x49, 0x50, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x15, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x50, 0x0a, - 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, - 0xc0, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x32, - 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x10, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4c, 0x6f, 0x67, - 0x30, 0x01, 0x12, 0x3c, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x30, 0x01, - 0x12, 0x40, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x30, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x46, 0x6c, 0x6f, 0x77, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, + 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x32, 0xc5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x46, 0x6c, 0x6f, 0x77, 0x12, + 0x35, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, 0x4c, 0x6f, 0x67, 0x12, 0x14, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x1a, 0x10, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, + 0x49, 0x4c, 0x6f, 0x67, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x50, 0x49, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x14, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, + 0x79, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x16, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x30, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x53, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x46, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -563,32 +564,32 @@ func file_sentryflow_proto_rawDescGZIP() []byte { var file_sentryflow_proto_msgTypes = make([]protoimpl.MessageInfo, 11) var file_sentryflow_proto_goTypes = []interface{}{ - (*ClientInfo)(nil), // 0: protobuf.ClientInfo - (*APILog)(nil), // 1: protobuf.APILog - (*APIMetric)(nil), // 2: protobuf.APIMetric - (*MetricValue)(nil), // 3: protobuf.MetricValue - (*EnvoyMetric)(nil), // 4: protobuf.EnvoyMetric - nil, // 5: protobuf.APILog.SrcLabelEntry - nil, // 6: protobuf.APILog.DstLabelEntry - nil, // 7: protobuf.APIMetric.PerAPICountsEntry - nil, // 8: protobuf.MetricValue.ValueEntry - nil, // 9: protobuf.EnvoyMetric.LabelsEntry - nil, // 10: protobuf.EnvoyMetric.MetricEntry + (*ClientInfo)(nil), // 0: protobuf.ClientInfo + (*APILog)(nil), // 1: protobuf.APILog + (*APIMetrics)(nil), // 2: protobuf.APIMetrics + (*MetricValue)(nil), // 3: protobuf.MetricValue + (*EnvoyMetrics)(nil), // 4: protobuf.EnvoyMetrics + nil, // 5: protobuf.APILog.SrcLabelEntry + nil, // 6: protobuf.APILog.DstLabelEntry + nil, // 7: protobuf.APIMetrics.PerAPICountsEntry + nil, // 8: protobuf.MetricValue.ValueEntry + nil, // 9: protobuf.EnvoyMetrics.LabelsEntry + nil, // 10: protobuf.EnvoyMetrics.MetricsEntry } var file_sentryflow_proto_depIdxs = []int32{ 5, // 0: protobuf.APILog.srcLabel:type_name -> protobuf.APILog.SrcLabelEntry 6, // 1: protobuf.APILog.dstLabel:type_name -> protobuf.APILog.DstLabelEntry - 7, // 2: protobuf.APIMetric.perAPICounts:type_name -> protobuf.APIMetric.PerAPICountsEntry + 7, // 2: protobuf.APIMetrics.perAPICounts:type_name -> protobuf.APIMetrics.PerAPICountsEntry 8, // 3: protobuf.MetricValue.value:type_name -> protobuf.MetricValue.ValueEntry - 9, // 4: protobuf.EnvoyMetric.Labels:type_name -> protobuf.EnvoyMetric.LabelsEntry - 10, // 5: protobuf.EnvoyMetric.metric:type_name -> protobuf.EnvoyMetric.MetricEntry - 3, // 6: protobuf.EnvoyMetric.MetricEntry.value:type_name -> protobuf.MetricValue - 0, // 7: protobuf.SentryFlow.GetLog:input_type -> protobuf.ClientInfo + 9, // 4: protobuf.EnvoyMetrics.labels:type_name -> protobuf.EnvoyMetrics.LabelsEntry + 10, // 5: protobuf.EnvoyMetrics.metrics:type_name -> protobuf.EnvoyMetrics.MetricsEntry + 3, // 6: protobuf.EnvoyMetrics.MetricsEntry.value:type_name -> protobuf.MetricValue + 0, // 7: protobuf.SentryFlow.GetAPILog:input_type -> protobuf.ClientInfo 0, // 8: protobuf.SentryFlow.GetAPIMetrics:input_type -> protobuf.ClientInfo 0, // 9: protobuf.SentryFlow.GetEnvoyMetrics:input_type -> protobuf.ClientInfo - 1, // 10: protobuf.SentryFlow.GetLog:output_type -> protobuf.APILog - 2, // 11: protobuf.SentryFlow.GetAPIMetrics:output_type -> protobuf.APIMetric - 4, // 12: protobuf.SentryFlow.GetEnvoyMetrics:output_type -> protobuf.EnvoyMetric + 1, // 10: protobuf.SentryFlow.GetAPILog:output_type -> protobuf.APILog + 2, // 11: protobuf.SentryFlow.GetAPIMetrics:output_type -> protobuf.APIMetrics + 4, // 12: protobuf.SentryFlow.GetEnvoyMetrics:output_type -> protobuf.EnvoyMetrics 10, // [10:13] is the sub-list for method output_type 7, // [7:10] is the sub-list for method input_type 7, // [7:7] is the sub-list for extension type_name @@ -627,7 +628,7 @@ func file_sentryflow_proto_init() { } } file_sentryflow_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*APIMetric); i { + switch v := v.(*APIMetrics); i { case 0: return &v.state case 1: @@ -651,7 +652,7 @@ func file_sentryflow_proto_init() { } } file_sentryflow_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnvoyMetric); i { + switch v := v.(*EnvoyMetrics); i { case 0: return &v.state case 1: diff --git a/protobuf/sentryflow.proto b/protobuf/sentryflow.proto index a1cb1ea..eaed177 100644 --- a/protobuf/sentryflow.proto +++ b/protobuf/sentryflow.proto @@ -16,24 +16,26 @@ message APILog { string srcNamespace = 11; string srcName = 12; map srcLabel = 13; - string srcType = 16; - string srcIP = 17; - string srcPort = 18; - - string dstNamespace = 21; - string dstName = 22; - map dstLabel = 23; - string dstType = 26; - string dstIP = 27; - string dstPort = 28; - - string protocol = 31; - string method = 32; - string path = 33; - int32 responseCode = 34; + + string srcType = 21; + string srcIP = 22; + string srcPort = 23; + + string dstNamespace = 31; + string dstName = 32; + map dstLabel = 33; + + string dstType = 41; + string dstIP = 42; + string dstPort = 43; + + string protocol = 51; + string method = 52; + string path = 53; + int32 responseCode = 54; } -message APIMetric { +message APIMetrics { map perAPICounts = 1; // @todo: add some more metrics here } @@ -42,18 +44,20 @@ message MetricValue { map value = 1; } -message EnvoyMetric { - string Namespace = 1; - string Name = 2; - map Labels = 7; - string podIP = 3; - string timeStamp = 5; - map metric = 6; +message EnvoyMetrics { + string timeStamp = 1; + + string namespace = 11; + string name = 12; + string IPAddress = 13; + map labels = 14; + + map metrics = 21; } service SentryFlow { - rpc GetLog(ClientInfo) returns (stream APILog); - rpc GetAPIMetrics(ClientInfo) returns (stream APIMetric); - rpc GetEnvoyMetrics(ClientInfo) returns (stream EnvoyMetric); + rpc GetAPILog(ClientInfo) returns (stream APILog); + rpc GetAPIMetrics(ClientInfo) returns (stream APIMetrics); + rpc GetEnvoyMetrics(ClientInfo) returns (stream EnvoyMetrics); } diff --git a/protobuf/sentryflow_grpc.pb.go b/protobuf/sentryflow_grpc.pb.go index 03a57b3..11f431c 100644 --- a/protobuf/sentryflow_grpc.pb.go +++ b/protobuf/sentryflow_grpc.pb.go @@ -19,7 +19,7 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( - SentryFlow_GetLog_FullMethodName = "/protobuf.SentryFlow/GetLog" + SentryFlow_GetAPILog_FullMethodName = "/protobuf.SentryFlow/GetAPILog" SentryFlow_GetAPIMetrics_FullMethodName = "/protobuf.SentryFlow/GetAPIMetrics" SentryFlow_GetEnvoyMetrics_FullMethodName = "/protobuf.SentryFlow/GetEnvoyMetrics" ) @@ -28,7 +28,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SentryFlowClient interface { - GetLog(ctx context.Context, in *ClientInfo, opts ...grpc.CallOption) (SentryFlow_GetLogClient, error) + GetAPILog(ctx context.Context, in *ClientInfo, opts ...grpc.CallOption) (SentryFlow_GetAPILogClient, error) GetAPIMetrics(ctx context.Context, in *ClientInfo, opts ...grpc.CallOption) (SentryFlow_GetAPIMetricsClient, error) GetEnvoyMetrics(ctx context.Context, in *ClientInfo, opts ...grpc.CallOption) (SentryFlow_GetEnvoyMetricsClient, error) } @@ -41,12 +41,12 @@ func NewSentryFlowClient(cc grpc.ClientConnInterface) SentryFlowClient { return &sentryFlowClient{cc} } -func (c *sentryFlowClient) GetLog(ctx context.Context, in *ClientInfo, opts ...grpc.CallOption) (SentryFlow_GetLogClient, error) { - stream, err := c.cc.NewStream(ctx, &SentryFlow_ServiceDesc.Streams[0], SentryFlow_GetLog_FullMethodName, opts...) +func (c *sentryFlowClient) GetAPILog(ctx context.Context, in *ClientInfo, opts ...grpc.CallOption) (SentryFlow_GetAPILogClient, error) { + stream, err := c.cc.NewStream(ctx, &SentryFlow_ServiceDesc.Streams[0], SentryFlow_GetAPILog_FullMethodName, opts...) if err != nil { return nil, err } - x := &sentryFlowGetLogClient{stream} + x := &sentryFlowGetAPILogClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -56,16 +56,16 @@ func (c *sentryFlowClient) GetLog(ctx context.Context, in *ClientInfo, opts ...g return x, nil } -type SentryFlow_GetLogClient interface { +type SentryFlow_GetAPILogClient interface { Recv() (*APILog, error) grpc.ClientStream } -type sentryFlowGetLogClient struct { +type sentryFlowGetAPILogClient struct { grpc.ClientStream } -func (x *sentryFlowGetLogClient) Recv() (*APILog, error) { +func (x *sentryFlowGetAPILogClient) Recv() (*APILog, error) { m := new(APILog) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err @@ -89,7 +89,7 @@ func (c *sentryFlowClient) GetAPIMetrics(ctx context.Context, in *ClientInfo, op } type SentryFlow_GetAPIMetricsClient interface { - Recv() (*APIMetric, error) + Recv() (*APIMetrics, error) grpc.ClientStream } @@ -97,8 +97,8 @@ type sentryFlowGetAPIMetricsClient struct { grpc.ClientStream } -func (x *sentryFlowGetAPIMetricsClient) Recv() (*APIMetric, error) { - m := new(APIMetric) +func (x *sentryFlowGetAPIMetricsClient) Recv() (*APIMetrics, error) { + m := new(APIMetrics) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -121,7 +121,7 @@ func (c *sentryFlowClient) GetEnvoyMetrics(ctx context.Context, in *ClientInfo, } type SentryFlow_GetEnvoyMetricsClient interface { - Recv() (*EnvoyMetric, error) + Recv() (*EnvoyMetrics, error) grpc.ClientStream } @@ -129,8 +129,8 @@ type sentryFlowGetEnvoyMetricsClient struct { grpc.ClientStream } -func (x *sentryFlowGetEnvoyMetricsClient) Recv() (*EnvoyMetric, error) { - m := new(EnvoyMetric) +func (x *sentryFlowGetEnvoyMetricsClient) Recv() (*EnvoyMetrics, error) { + m := new(EnvoyMetrics) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -141,7 +141,7 @@ func (x *sentryFlowGetEnvoyMetricsClient) Recv() (*EnvoyMetric, error) { // All implementations should embed UnimplementedSentryFlowServer // for forward compatibility type SentryFlowServer interface { - GetLog(*ClientInfo, SentryFlow_GetLogServer) error + GetAPILog(*ClientInfo, SentryFlow_GetAPILogServer) error GetAPIMetrics(*ClientInfo, SentryFlow_GetAPIMetricsServer) error GetEnvoyMetrics(*ClientInfo, SentryFlow_GetEnvoyMetricsServer) error } @@ -150,8 +150,8 @@ type SentryFlowServer interface { type UnimplementedSentryFlowServer struct { } -func (UnimplementedSentryFlowServer) GetLog(*ClientInfo, SentryFlow_GetLogServer) error { - return status.Errorf(codes.Unimplemented, "method GetLog not implemented") +func (UnimplementedSentryFlowServer) GetAPILog(*ClientInfo, SentryFlow_GetAPILogServer) error { + return status.Errorf(codes.Unimplemented, "method GetAPILog not implemented") } func (UnimplementedSentryFlowServer) GetAPIMetrics(*ClientInfo, SentryFlow_GetAPIMetricsServer) error { return status.Errorf(codes.Unimplemented, "method GetAPIMetrics not implemented") @@ -171,24 +171,24 @@ func RegisterSentryFlowServer(s grpc.ServiceRegistrar, srv SentryFlowServer) { s.RegisterService(&SentryFlow_ServiceDesc, srv) } -func _SentryFlow_GetLog_Handler(srv interface{}, stream grpc.ServerStream) error { +func _SentryFlow_GetAPILog_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ClientInfo) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SentryFlowServer).GetLog(m, &sentryFlowGetLogServer{stream}) + return srv.(SentryFlowServer).GetAPILog(m, &sentryFlowGetAPILogServer{stream}) } -type SentryFlow_GetLogServer interface { +type SentryFlow_GetAPILogServer interface { Send(*APILog) error grpc.ServerStream } -type sentryFlowGetLogServer struct { +type sentryFlowGetAPILogServer struct { grpc.ServerStream } -func (x *sentryFlowGetLogServer) Send(m *APILog) error { +func (x *sentryFlowGetAPILogServer) Send(m *APILog) error { return x.ServerStream.SendMsg(m) } @@ -201,7 +201,7 @@ func _SentryFlow_GetAPIMetrics_Handler(srv interface{}, stream grpc.ServerStream } type SentryFlow_GetAPIMetricsServer interface { - Send(*APIMetric) error + Send(*APIMetrics) error grpc.ServerStream } @@ -209,7 +209,7 @@ type sentryFlowGetAPIMetricsServer struct { grpc.ServerStream } -func (x *sentryFlowGetAPIMetricsServer) Send(m *APIMetric) error { +func (x *sentryFlowGetAPIMetricsServer) Send(m *APIMetrics) error { return x.ServerStream.SendMsg(m) } @@ -222,7 +222,7 @@ func _SentryFlow_GetEnvoyMetrics_Handler(srv interface{}, stream grpc.ServerStre } type SentryFlow_GetEnvoyMetricsServer interface { - Send(*EnvoyMetric) error + Send(*EnvoyMetrics) error grpc.ServerStream } @@ -230,7 +230,7 @@ type sentryFlowGetEnvoyMetricsServer struct { grpc.ServerStream } -func (x *sentryFlowGetEnvoyMetricsServer) Send(m *EnvoyMetric) error { +func (x *sentryFlowGetEnvoyMetricsServer) Send(m *EnvoyMetrics) error { return x.ServerStream.SendMsg(m) } @@ -243,8 +243,8 @@ var SentryFlow_ServiceDesc = grpc.ServiceDesc{ Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { - StreamName: "GetLog", - Handler: _SentryFlow_GetLog_Handler, + StreamName: "GetAPILog", + Handler: _SentryFlow_GetAPILog_Handler, ServerStreams: true, }, { diff --git a/protobuf/sentryflow_metrics.pb.go b/protobuf/sentryflow_metrics.pb.go index d5ce6fc..1ed7b04 100644 --- a/protobuf/sentryflow_metrics.pb.go +++ b/protobuf/sentryflow_metrics.pb.go @@ -20,16 +20,16 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type APIClassificationRequest struct { +type APIClassifierRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Path []string `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` + API []string `protobuf:"bytes,1,rep,name=API,proto3" json:"API,omitempty"` } -func (x *APIClassificationRequest) Reset() { - *x = APIClassificationRequest{} +func (x *APIClassifierRequest) Reset() { + *x = APIClassifierRequest{} if protoimpl.UnsafeEnabled { mi := &file_sentryflow_metrics_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -37,13 +37,13 @@ func (x *APIClassificationRequest) Reset() { } } -func (x *APIClassificationRequest) String() string { +func (x *APIClassifierRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*APIClassificationRequest) ProtoMessage() {} +func (*APIClassifierRequest) ProtoMessage() {} -func (x *APIClassificationRequest) ProtoReflect() protoreflect.Message { +func (x *APIClassifierRequest) ProtoReflect() protoreflect.Message { mi := &file_sentryflow_metrics_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -55,28 +55,28 @@ func (x *APIClassificationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use APIClassificationRequest.ProtoReflect.Descriptor instead. -func (*APIClassificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use APIClassifierRequest.ProtoReflect.Descriptor instead. +func (*APIClassifierRequest) Descriptor() ([]byte, []int) { return file_sentryflow_metrics_proto_rawDescGZIP(), []int{0} } -func (x *APIClassificationRequest) GetPath() []string { +func (x *APIClassifierRequest) GetAPI() []string { if x != nil { - return x.Path + return x.API } return nil } -type APIClassificationResponse struct { +type APIClassifierResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Fields map[string]uint64 `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + APIs map[string]uint64 `protobuf:"bytes,1,rep,name=APIs,proto3" json:"APIs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *APIClassificationResponse) Reset() { - *x = APIClassificationResponse{} +func (x *APIClassifierResponse) Reset() { + *x = APIClassifierResponse{} if protoimpl.UnsafeEnabled { mi := &file_sentryflow_metrics_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -84,13 +84,13 @@ func (x *APIClassificationResponse) Reset() { } } -func (x *APIClassificationResponse) String() string { +func (x *APIClassifierResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*APIClassificationResponse) ProtoMessage() {} +func (*APIClassifierResponse) ProtoMessage() {} -func (x *APIClassificationResponse) ProtoReflect() protoreflect.Message { +func (x *APIClassifierResponse) ProtoReflect() protoreflect.Message { mi := &file_sentryflow_metrics_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -102,14 +102,14 @@ func (x *APIClassificationResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use APIClassificationResponse.ProtoReflect.Descriptor instead. -func (*APIClassificationResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use APIClassifierResponse.ProtoReflect.Descriptor instead. +func (*APIClassifierResponse) Descriptor() ([]byte, []int) { return file_sentryflow_metrics_proto_rawDescGZIP(), []int{1} } -func (x *APIClassificationResponse) GetFields() map[string]uint64 { +func (x *APIClassifierResponse) GetAPIs() map[string]uint64 { if x != nil { - return x.Fields + return x.APIs } return nil } @@ -119,29 +119,27 @@ var File_sentryflow_metrics_proto protoreflect.FileDescriptor var file_sentryflow_metrics_proto_rawDesc = []byte{ 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x22, 0x2e, 0x0a, 0x18, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x9f, 0x01, 0x0a, 0x19, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, - 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0x78, 0x0a, 0x11, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x46, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x63, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x42, 0x15, 0x5a, 0x13, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x46, 0x6c, 0x6f, 0x77, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x62, 0x75, 0x66, 0x22, 0x28, 0x0a, 0x14, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x41, 0x50, 0x49, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x41, 0x50, 0x49, 0x22, 0x8f, + 0x01, 0x0a, 0x15, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x41, 0x50, 0x49, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x41, 0x50, 0x49, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x41, 0x50, 0x49, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x32, 0x64, 0x0a, 0x0d, 0x41, 0x50, 0x49, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x53, 0x0a, 0x0c, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x79, 0x41, 0x50, 0x49, + 0x73, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x50, 0x49, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x53, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x46, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -158,14 +156,14 @@ func file_sentryflow_metrics_proto_rawDescGZIP() []byte { var file_sentryflow_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_sentryflow_metrics_proto_goTypes = []interface{}{ - (*APIClassificationRequest)(nil), // 0: protobuf.APIClassificationRequest - (*APIClassificationResponse)(nil), // 1: protobuf.APIClassificationResponse - nil, // 2: protobuf.APIClassificationResponse.FieldsEntry + (*APIClassifierRequest)(nil), // 0: protobuf.APIClassifierRequest + (*APIClassifierResponse)(nil), // 1: protobuf.APIClassifierResponse + nil, // 2: protobuf.APIClassifierResponse.APIsEntry } var file_sentryflow_metrics_proto_depIdxs = []int32{ - 2, // 0: protobuf.APIClassificationResponse.fields:type_name -> protobuf.APIClassificationResponse.FieldsEntry - 0, // 1: protobuf.SentryFlowMetrics.GetAPIClassification:input_type -> protobuf.APIClassificationRequest - 1, // 2: protobuf.SentryFlowMetrics.GetAPIClassification:output_type -> protobuf.APIClassificationResponse + 2, // 0: protobuf.APIClassifierResponse.APIs:type_name -> protobuf.APIClassifierResponse.APIsEntry + 0, // 1: protobuf.APIClassifier.ClassifyAPIs:input_type -> protobuf.APIClassifierRequest + 1, // 2: protobuf.APIClassifier.ClassifyAPIs:output_type -> protobuf.APIClassifierResponse 2, // [2:3] is the sub-list for method output_type 1, // [1:2] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -180,7 +178,7 @@ func file_sentryflow_metrics_proto_init() { } if !protoimpl.UnsafeEnabled { file_sentryflow_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*APIClassificationRequest); i { + switch v := v.(*APIClassifierRequest); i { case 0: return &v.state case 1: @@ -192,7 +190,7 @@ func file_sentryflow_metrics_proto_init() { } } file_sentryflow_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*APIClassificationResponse); i { + switch v := v.(*APIClassifierResponse); i { case 0: return &v.state case 1: diff --git a/protobuf/sentryflow_metrics.proto b/protobuf/sentryflow_metrics.proto index 7577b3f..931c7b0 100644 --- a/protobuf/sentryflow_metrics.proto +++ b/protobuf/sentryflow_metrics.proto @@ -4,14 +4,14 @@ package protobuf; option go_package = "SentryFlow/protobuf"; -message APIClassificationRequest { - repeated string path = 1; +message APIClassifierRequest { + repeated string API = 1; } -message APIClassificationResponse { - map fields = 2; +message APIClassifierResponse { + map APIs = 1; } -service SentryFlowMetrics { - rpc GetAPIClassification(stream APIClassificationRequest) returns (stream APIClassificationResponse); +service APIClassifier { + rpc ClassifyAPIs(stream APIClassifierRequest) returns (stream APIClassifierResponse); } diff --git a/protobuf/sentryflow_metrics_grpc.pb.go b/protobuf/sentryflow_metrics_grpc.pb.go index 33add51..0f3c100 100644 --- a/protobuf/sentryflow_metrics_grpc.pb.go +++ b/protobuf/sentryflow_metrics_grpc.pb.go @@ -19,118 +19,118 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( - SentryFlowMetrics_GetAPIClassification_FullMethodName = "/protobuf.SentryFlowMetrics/GetAPIClassification" + APIClassifier_ClassifyAPIs_FullMethodName = "/protobuf.APIClassifier/ClassifyAPIs" ) -// SentryFlowMetricsClient is the client API for SentryFlowMetrics service. +// APIClassifierClient is the client API for APIClassifier service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SentryFlowMetricsClient interface { - GetAPIClassification(ctx context.Context, opts ...grpc.CallOption) (SentryFlowMetrics_GetAPIClassificationClient, error) +type APIClassifierClient interface { + ClassifyAPIs(ctx context.Context, opts ...grpc.CallOption) (APIClassifier_ClassifyAPIsClient, error) } -type sentryFlowMetricsClient struct { +type aPIClassifierClient struct { cc grpc.ClientConnInterface } -func NewSentryFlowMetricsClient(cc grpc.ClientConnInterface) SentryFlowMetricsClient { - return &sentryFlowMetricsClient{cc} +func NewAPIClassifierClient(cc grpc.ClientConnInterface) APIClassifierClient { + return &aPIClassifierClient{cc} } -func (c *sentryFlowMetricsClient) GetAPIClassification(ctx context.Context, opts ...grpc.CallOption) (SentryFlowMetrics_GetAPIClassificationClient, error) { - stream, err := c.cc.NewStream(ctx, &SentryFlowMetrics_ServiceDesc.Streams[0], SentryFlowMetrics_GetAPIClassification_FullMethodName, opts...) +func (c *aPIClassifierClient) ClassifyAPIs(ctx context.Context, opts ...grpc.CallOption) (APIClassifier_ClassifyAPIsClient, error) { + stream, err := c.cc.NewStream(ctx, &APIClassifier_ServiceDesc.Streams[0], APIClassifier_ClassifyAPIs_FullMethodName, opts...) if err != nil { return nil, err } - x := &sentryFlowMetricsGetAPIClassificationClient{stream} + x := &aPIClassifierClassifyAPIsClient{stream} return x, nil } -type SentryFlowMetrics_GetAPIClassificationClient interface { - Send(*APIClassificationRequest) error - Recv() (*APIClassificationResponse, error) +type APIClassifier_ClassifyAPIsClient interface { + Send(*APIClassifierRequest) error + Recv() (*APIClassifierResponse, error) grpc.ClientStream } -type sentryFlowMetricsGetAPIClassificationClient struct { +type aPIClassifierClassifyAPIsClient struct { grpc.ClientStream } -func (x *sentryFlowMetricsGetAPIClassificationClient) Send(m *APIClassificationRequest) error { +func (x *aPIClassifierClassifyAPIsClient) Send(m *APIClassifierRequest) error { return x.ClientStream.SendMsg(m) } -func (x *sentryFlowMetricsGetAPIClassificationClient) Recv() (*APIClassificationResponse, error) { - m := new(APIClassificationResponse) +func (x *aPIClassifierClassifyAPIsClient) Recv() (*APIClassifierResponse, error) { + m := new(APIClassifierResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -// SentryFlowMetricsServer is the server API for SentryFlowMetrics service. -// All implementations should embed UnimplementedSentryFlowMetricsServer +// APIClassifierServer is the server API for APIClassifier service. +// All implementations should embed UnimplementedAPIClassifierServer // for forward compatibility -type SentryFlowMetricsServer interface { - GetAPIClassification(SentryFlowMetrics_GetAPIClassificationServer) error +type APIClassifierServer interface { + ClassifyAPIs(APIClassifier_ClassifyAPIsServer) error } -// UnimplementedSentryFlowMetricsServer should be embedded to have forward compatible implementations. -type UnimplementedSentryFlowMetricsServer struct { +// UnimplementedAPIClassifierServer should be embedded to have forward compatible implementations. +type UnimplementedAPIClassifierServer struct { } -func (UnimplementedSentryFlowMetricsServer) GetAPIClassification(SentryFlowMetrics_GetAPIClassificationServer) error { - return status.Errorf(codes.Unimplemented, "method GetAPIClassification not implemented") +func (UnimplementedAPIClassifierServer) ClassifyAPIs(APIClassifier_ClassifyAPIsServer) error { + return status.Errorf(codes.Unimplemented, "method ClassifyAPIs not implemented") } -// UnsafeSentryFlowMetricsServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SentryFlowMetricsServer will +// UnsafeAPIClassifierServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to APIClassifierServer will // result in compilation errors. -type UnsafeSentryFlowMetricsServer interface { - mustEmbedUnimplementedSentryFlowMetricsServer() +type UnsafeAPIClassifierServer interface { + mustEmbedUnimplementedAPIClassifierServer() } -func RegisterSentryFlowMetricsServer(s grpc.ServiceRegistrar, srv SentryFlowMetricsServer) { - s.RegisterService(&SentryFlowMetrics_ServiceDesc, srv) +func RegisterAPIClassifierServer(s grpc.ServiceRegistrar, srv APIClassifierServer) { + s.RegisterService(&APIClassifier_ServiceDesc, srv) } -func _SentryFlowMetrics_GetAPIClassification_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SentryFlowMetricsServer).GetAPIClassification(&sentryFlowMetricsGetAPIClassificationServer{stream}) +func _APIClassifier_ClassifyAPIs_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(APIClassifierServer).ClassifyAPIs(&aPIClassifierClassifyAPIsServer{stream}) } -type SentryFlowMetrics_GetAPIClassificationServer interface { - Send(*APIClassificationResponse) error - Recv() (*APIClassificationRequest, error) +type APIClassifier_ClassifyAPIsServer interface { + Send(*APIClassifierResponse) error + Recv() (*APIClassifierRequest, error) grpc.ServerStream } -type sentryFlowMetricsGetAPIClassificationServer struct { +type aPIClassifierClassifyAPIsServer struct { grpc.ServerStream } -func (x *sentryFlowMetricsGetAPIClassificationServer) Send(m *APIClassificationResponse) error { +func (x *aPIClassifierClassifyAPIsServer) Send(m *APIClassifierResponse) error { return x.ServerStream.SendMsg(m) } -func (x *sentryFlowMetricsGetAPIClassificationServer) Recv() (*APIClassificationRequest, error) { - m := new(APIClassificationRequest) +func (x *aPIClassifierClassifyAPIsServer) Recv() (*APIClassifierRequest, error) { + m := new(APIClassifierRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -// SentryFlowMetrics_ServiceDesc is the grpc.ServiceDesc for SentryFlowMetrics service. +// APIClassifier_ServiceDesc is the grpc.ServiceDesc for APIClassifier service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) -var SentryFlowMetrics_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "protobuf.SentryFlowMetrics", - HandlerType: (*SentryFlowMetricsServer)(nil), +var APIClassifier_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "protobuf.APIClassifier", + HandlerType: (*APIClassifierServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { - StreamName: "GetAPIClassification", - Handler: _SentryFlowMetrics_GetAPIClassification_Handler, + StreamName: "ClassifyAPIs", + Handler: _APIClassifier_ClassifyAPIs_Handler, ServerStreams: true, ClientStreams: true, }, diff --git a/protobuf/sentryflow_metrics_pb2.py b/protobuf/sentryflow_metrics_pb2.py new file mode 100644 index 0000000..e6cc29c --- /dev/null +++ b/protobuf/sentryflow_metrics_pb2.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: sentryflow_metrics.proto +# Protobuf Python Version: 5.26.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18sentryflow_metrics.proto\x12\x08protobuf\"#\n\x14\x41PIClassifierRequest\x12\x0b\n\x03\x41PI\x18\x01 \x03(\t\"}\n\x15\x41PIClassifierResponse\x12\x37\n\x04\x41PIs\x18\x01 \x03(\x0b\x32).protobuf.APIClassifierResponse.APIsEntry\x1a+\n\tAPIsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x04:\x02\x38\x01\x32\x64\n\rAPIClassifier\x12S\n\x0c\x43lassifyAPIs\x12\x1e.protobuf.APIClassifierRequest\x1a\x1f.protobuf.APIClassifierResponse(\x01\x30\x01\x42\x15Z\x13SentryFlow/protobufb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentryflow_metrics_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'Z\023SentryFlow/protobuf' + _globals['_APICLASSIFIERRESPONSE_APISENTRY']._loaded_options = None + _globals['_APICLASSIFIERRESPONSE_APISENTRY']._serialized_options = b'8\001' + _globals['_APICLASSIFIERREQUEST']._serialized_start=38 + _globals['_APICLASSIFIERREQUEST']._serialized_end=73 + _globals['_APICLASSIFIERRESPONSE']._serialized_start=75 + _globals['_APICLASSIFIERRESPONSE']._serialized_end=200 + _globals['_APICLASSIFIERRESPONSE_APISENTRY']._serialized_start=157 + _globals['_APICLASSIFIERRESPONSE_APISENTRY']._serialized_end=200 + _globals['_APICLASSIFIER']._serialized_start=202 + _globals['_APICLASSIFIER']._serialized_end=302 +# @@protoc_insertion_point(module_scope) diff --git a/protobuf/sentryflow_metrics_pb2.pyi b/protobuf/sentryflow_metrics_pb2.pyi new file mode 100644 index 0000000..ec4ea24 --- /dev/null +++ b/protobuf/sentryflow_metrics_pb2.pyi @@ -0,0 +1,25 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class APIClassifierRequest(_message.Message): + __slots__ = ("API",) + API_FIELD_NUMBER: _ClassVar[int] + API: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, API: _Optional[_Iterable[str]] = ...) -> None: ... + +class APIClassifierResponse(_message.Message): + __slots__ = ("APIs",) + class APIsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: int + def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... + APIS_FIELD_NUMBER: _ClassVar[int] + APIs: _containers.ScalarMap[str, int] + def __init__(self, APIs: _Optional[_Mapping[str, int]] = ...) -> None: ... diff --git a/protobuf/sentryflow_metrics_pb2_grpc.py b/protobuf/sentryflow_metrics_pb2_grpc.py new file mode 100644 index 0000000..87671ed --- /dev/null +++ b/protobuf/sentryflow_metrics_pb2_grpc.py @@ -0,0 +1,101 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import sentryflow_metrics_pb2 as sentryflow__metrics__pb2 + +GRPC_GENERATED_VERSION = '1.63.0' +GRPC_VERSION = grpc.__version__ +EXPECTED_ERROR_RELEASE = '1.65.0' +SCHEDULED_RELEASE_DATE = 'June 25, 2024' +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + warnings.warn( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in sentryflow_metrics_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + + f' This warning will become an error in {EXPECTED_ERROR_RELEASE},' + + f' scheduled for release on {SCHEDULED_RELEASE_DATE}.', + RuntimeWarning + ) + + +class APIClassifierStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ClassifyAPIs = channel.stream_stream( + '/protobuf.APIClassifier/ClassifyAPIs', + request_serializer=sentryflow__metrics__pb2.APIClassifierRequest.SerializeToString, + response_deserializer=sentryflow__metrics__pb2.APIClassifierResponse.FromString, + _registered_method=True) + + +class APIClassifierServicer(object): + """Missing associated documentation comment in .proto file.""" + + def ClassifyAPIs(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_APIClassifierServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ClassifyAPIs': grpc.stream_stream_rpc_method_handler( + servicer.ClassifyAPIs, + request_deserializer=sentryflow__metrics__pb2.APIClassifierRequest.FromString, + response_serializer=sentryflow__metrics__pb2.APIClassifierResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'protobuf.APIClassifier', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class APIClassifier(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def ClassifyAPIs(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/protobuf.APIClassifier/ClassifyAPIs', + sentryflow__metrics__pb2.APIClassifierRequest.SerializeToString, + sentryflow__metrics__pb2.APIClassifierResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sentryflow-clients/README.md b/sentryflow-clients/README.md deleted file mode 100644 index 999453c..0000000 --- a/sentryflow-clients/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# SentryFlow Clients - diff --git a/sentryflow-clients/mongo-client/common/config.go b/sentryflow-clients/mongo-client/common/config.go deleted file mode 100644 index 6a9e25f..0000000 --- a/sentryflow-clients/mongo-client/common/config.go +++ /dev/null @@ -1,36 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package common - -import ( - "errors" - "fmt" - "os" - "strconv" -) - -// Config structure -type Config struct { - ServerAddr string - ServerPort int -} - -// Cfg is for global reference -var Cfg Config - -// LoadEnvVars loads environment variables and stores them as global variable -func LoadEnvVars() (Config, error) { - var err error - - // load listen address and check if valid - Cfg.ServerAddr = os.Getenv("SERVER_ADDR") - - // load listen port and check if valid - Cfg.ServerPort, err = strconv.Atoi(os.Getenv("SERVER_PORT")) - if err != nil { - msg := fmt.Sprintf("invalid server port %s: %v", os.Getenv("SERVER_PORT"), err) - return Cfg, errors.New(msg) - } - - return Cfg, nil -} diff --git a/sentryflow-clients/mongo-client/db/dbHandler.go b/sentryflow-clients/mongo-client/db/dbHandler.go deleted file mode 100644 index ea0127f..0000000 --- a/sentryflow-clients/mongo-client/db/dbHandler.go +++ /dev/null @@ -1,96 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package db - -import ( - protobuf "SentryFlow/protobuf" - "context" - "errors" - "fmt" - "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" - "log" - "os" - "time" -) - -// Handler structure -type Handler struct { - client *mongo.Client - database *mongo.Database - alCollection *mongo.Collection - metricsCollection *mongo.Collection - cancel context.CancelFunc - dbURL string -} - -// Manager structure -var Manager *Handler - -// New creates a new mongoDB handler -func New() (*Handler, error) { - dbHost := os.Getenv("MONGODB_HOST") - h := Handler{} - var err error - - // Environment variable was not set - if dbHost == "" { - return nil, errors.New("$MONGODB_HOST not set") - } - - // Create a MongoDB client - h.client, err = mongo.NewClient(options.Client().ApplyURI(dbHost)) - if err != nil { - msg := fmt.Sprintf("unable to initialize monogoDB client for %s: %v", dbHost, err) - return nil, errors.New(msg) - } - - // Set timeout (10 sec) - var ctx context.Context - ctx, h.cancel = context.WithTimeout(context.Background(), 10*time.Second) - - // Try connecting the server - err = h.client.Connect(ctx) - if err != nil { - msg := fmt.Sprintf("unable to connect mongoDB server %s: %v", dbHost, err) - return nil, errors.New(msg) - } - - // Create 'sentryflow' database and 'api-logs' collection - h.database = h.client.Database("sentryflow") - h.alCollection = h.database.Collection("api-logs") - h.metricsCollection = h.database.Collection("metrics") - - Manager = &h - return &h, nil -} - -// Disconnect function -func (h *Handler) Disconnect() { - err := h.client.Disconnect(context.Background()) - if err != nil { - log.Printf("unable to properly disconnect: %v", err) - } - - return -} - -// InsertAl function -func (h *Handler) InsertAl(data *protobuf.APILog) error { - _, err := h.alCollection.InsertOne(context.Background(), data) - if err != nil { - return err - } - - return nil -} - -// InsertMetrics function -func (h *Handler) InsertMetrics(data *protobuf.EnvoyMetric) error { - _, err := h.metricsCollection.InsertOne(context.Background(), data) - if err != nil { - return err - } - - return nil -} diff --git a/sentryflow-clients/mongo-client/main.go b/sentryflow-clients/mongo-client/main.go deleted file mode 100644 index 8dce36c..0000000 --- a/sentryflow-clients/mongo-client/main.go +++ /dev/null @@ -1,120 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - protobuf "SentryFlow/protobuf" - "context" - "fmt" - "google.golang.org/grpc" - "io" - "log" - "mongo-client/common" - "mongo-client/db" - "os" - "os/signal" - "syscall" -) - -func accessLogRoutine(stream protobuf.SentryFlow_GetLogClient, done chan struct{}) { - for { - select { - default: - data, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Fatalf("failed to receive log: %v", err) - } - log.Printf("[Client] Inserting log") - err = db.Manager.InsertAl(data) - if err != nil { - log.Printf("[Client] Failed to insert log: %v", err) - } - case <-done: - return - } - } -} - -func metricRoutine(stream protobuf.SentryFlow_GetEnvoyMetricsClient, done chan struct{}) { - for { - select { - default: - data, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Fatalf("failed to receive metric: %v", err) - } - log.Printf("[Client] Inserting metric") - err = db.Manager.InsertMetrics(data) - if err != nil { - log.Printf("[Client] Failed to insert metric: %v", err) - } - case <-done: - return - } - } -} - -func main() { - // Init DB - _, err := db.New() - if err != nil { - log.Fatalf("Unable to intialize DB: %v", err) - } - - // Load environment variables - cfg, err := common.LoadEnvVars() - if err != nil { - log.Fatalf("Could not load environment variables: %v", err) - } - - // Construct address and start listening - addr := fmt.Sprintf("%s:%d", cfg.ServerAddr, cfg.ServerPort) - - // Set up a connection to the server. - conn, err := grpc.Dial(addr, grpc.WithInsecure()) - if err != nil { - log.Fatalf("could not connect: %v", err) - } - defer conn.Close() - - // Start serving gRPC server - log.Printf("[gRPC] Successfully connected to %s", addr) - - // Create a client for the SentryFlow service. - client := protobuf.NewSentryFlowClient(conn) - - hostname, err := os.Hostname() - if err != nil { - log.Fatalf("could not find hostname: %v", err) - } - - // Define the client information. - clientInfo := &protobuf.ClientInfo{ - HostName: hostname, - } - - // Contact the server and print out its response - accessLogStream, err := client.GetLog(context.Background(), clientInfo) - metricStream, err := client.GetEnvoyMetrics(context.Background(), clientInfo) - if err != nil { - log.Fatalf("could not get log: %v", err) - } - - done := make(chan struct{}) - - go accessLogRoutine(accessLogStream, done) - go metricRoutine(metricStream, done) - - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) - - <-signalChan - - close(done) -} diff --git a/sentryflow/Dockerfile b/sentryflow/Dockerfile index 196fcc7..4c1df63 100644 --- a/sentryflow/Dockerfile +++ b/sentryflow/Dockerfile @@ -5,35 +5,32 @@ FROM golang:1.21-alpine3.17 as builder RUN apk --no-cache update -RUN apk add --no-cache git clang llvm make gcc protobuf make +RUN apk add --no-cache git clang llvm make gcc protobuf musl-dev RUN apk add --update alpine-sdk + RUN go install github.com/golang/protobuf/protoc-gen-go@latest RUN go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest -RUN mkdir /app +RUN mkdir /sentryflow RUN mkdir /protobuf WORKDIR /protobuf COPY /protobuf . -RUN make build -WORKDIR /app +WORKDIR /sentryflow COPY /sentryflow . -RUN go install github.com/golang/protobuf/protoc-gen-go@latest -RUN go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest RUN export CGO_ENABLED=1; export CC=gcc; RUN go build -o sentryflow ### Make executable image -FROM alpine:3.18 as sentryflow +FROM alpine:3.17 as sentryflow -RUN echo "@community http://dl-cdn.alpinelinux.org/alpine/edge/community" | tee -a /etc/apk/repositories - -RUN apk --no-cache update -RUN apk add bash +# RUN echo "@community http://dl-cdn.alpinelinux.org/alpine/edge/community" | tee -a /etc/apk/repositories +# RUN apk --no-cache update +# RUN apk add bash -COPY --from=builder /app/sentryflow . +COPY --from=builder /sentryflow/sentryflow / -CMD ["./sentryflow"] +CMD ["/sentryflow"] diff --git a/sentryflow/Makefile b/sentryflow/Makefile index 2ee88fb..91e228c 100644 --- a/sentryflow/Makefile +++ b/sentryflow/Makefile @@ -4,6 +4,15 @@ PROG_NAME = sentryflow IMAGE_NAME = 5gsec/$(PROG_NAME) TAG = v0.1 +.PHONY: build +build: gofmt golint gosec + go mod tidy + go build -o $(PROG_NAME) + +.PHONY: clean +clean: + rm -f $(PROG_NAME) + .PHONY: gofmt gofmt: cd $(CURDIR); gofmt -w -s -d $(shell find . -type f -name '*.go' -print) @@ -38,15 +47,6 @@ ifeq (, $(shell which gosec)) endif cd $(CURDIR); gosec -exclude=G402 ./... -.PHONY: build -build: - go mod tidy - go build -o $(PROG_NAME) - -.PHONY: clean -clean: - rm -f $(PROG_NAME) - .PHONY: build-image build-image: docker build -t $(IMAGE_NAME):$(TAG) -f ./Dockerfile ../ diff --git a/sentryflow/collector/collectorHandler.go b/sentryflow/collector/collectorHandler.go index 85f885f..2d2e92d 100644 --- a/sentryflow/collector/collectorHandler.go +++ b/sentryflow/collector/collectorHandler.go @@ -3,98 +3,89 @@ package collector import ( - "errors" "fmt" - cfg "github.com/5GSEC/SentryFlow/config" - "google.golang.org/grpc" "log" "net" - "sync" + + "github.com/5gsec/SentryFlow/config" + "google.golang.org/grpc" ) -// Ch global reference for Collector Handler -var Ch *Handler +// == // + +// ColH global reference for Collector Handler +var ColH *ColHandler // init Function func init() { - Ch = NewCollectorHandler() + ColH = NewCollectorHandler() } -// Handler Structure -type Handler struct { - collectors []collectorInterface - - listener net.Listener +// ColHandler Structure +type ColHandler struct { + colService net.Listener grpcServer *grpc.Server - - wg sync.WaitGroup + collectors []collectorInterface } // NewCollectorHandler Function -func NewCollectorHandler() *Handler { - ch := &Handler{ +func NewCollectorHandler() *ColHandler { + ch := &ColHandler{ collectors: make([]collectorInterface, 0), } - return ch } -// InitGRPCServer Function -func (h *Handler) InitGRPCServer() error { - listenAddr := fmt.Sprintf("%s:%s", cfg.GlobalCfg.OtelGRPCListenAddr, cfg.GlobalCfg.OtelGRPCListenPort) +// == // + +// StartCollector Function +func StartCollector() bool { + // Make a string with the given collector address and port + collectorService := fmt.Sprintf("%s:%s", config.GlobalConfig.CollectorAddr, config.GlobalConfig.CollectorPort) - // Start listening - lis, err := net.Listen("tcp", listenAddr) + // Start listening gRPC port + colService, err := net.Listen("tcp", collectorService) if err != nil { - msg := fmt.Sprintf("unable to listen at %s: %v", listenAddr, err) - return errors.New(msg) + log.Fatalf("[Collector] Failed to listen at %s: %v", collectorService, err) + return false } + ColH.colService = colService - // Create gRPC Server, register services - server := grpc.NewServer() + log.Printf("[Collector] Listening Collector gRPC services (%s)", collectorService) - h.listener = lis - h.grpcServer = server + // Create gRPC Service + gRPCServer := grpc.NewServer() + ColH.grpcServer = gRPCServer - // initialize collectors - err = h.initCollectors() - if err != nil { - log.Printf("[Collector] Unable to initialize collector: %v", err) - } + // initialize OpenTelemetry collector + ColH.collectors = append(ColH.collectors, newOpenTelemetryLogsServer()) + + // initialize Envoy collectors for AccessLogs and Metrics + ColH.collectors = append(ColH.collectors, newEnvoyAccessLogsServer()) + ColH.collectors = append(ColH.collectors, newEnvoyMetricsServer()) // register services - h.registerServices() + for _, col := range ColH.collectors { + col.registerService(ColH.grpcServer) + } - log.Printf("[Collector] Server listening at %s", listenAddr) - return nil -} + log.Print("[Collector] Initialized Collector gRPC services") -// initCollectors Function -func (h *Handler) initCollectors() error { - // @todo make configuration determine which collector to start or not - h.collectors = append(h.collectors, newOtelLogServer()) - h.collectors = append(h.collectors, newEnvoyMetricsServer()) - h.collectors = append(h.collectors, newEnvoyAccessLogsServer()) + // Serve gRPC Service + go ColH.grpcServer.Serve(ColH.colService) - return nil -} + log.Print("[Collector] Serving Collector gRPC services") -// registerServices Function -func (h *Handler) registerServices() { - for _, col := range h.collectors { - col.registerService(h.grpcServer) - log.Printf("[Collector] Successfully registered services") - } + return true } -// Serve Function -func (h *Handler) Serve() error { - log.Printf("[Collector] Starting gRPC server") - return h.grpcServer.Serve(h.listener) -} +// StopCollector Function +func StopCollector() bool { + ColH.grpcServer.GracefulStop() -// Stop Function -func (h *Handler) Stop() { - log.Printf("[Collector] Stopped gRPC server") - h.grpcServer.GracefulStop() + log.Print("[Collector] Gracefully stopped Collector gRPC services") + + return true } + +// == // diff --git a/sentryflow/collector/envoy.go b/sentryflow/collector/envoy.go index f5e7caf..7fbccee 100644 --- a/sentryflow/collector/envoy.go +++ b/sentryflow/collector/envoy.go @@ -5,13 +5,41 @@ package collector import ( "io" "log" + "strconv" - "github.com/5GSEC/SentryFlow/core" - envoyAls "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" + "github.com/5gsec/SentryFlow/k8s" + "github.com/5gsec/SentryFlow/processor" + "github.com/5gsec/SentryFlow/protobuf" + "github.com/5gsec/SentryFlow/types" + + envoyAccLogsData "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" + envoyAccLogs "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v3" envoyMetrics "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v3" + "google.golang.org/grpc" ) +// == // + +// EnvoyAccessLogsServer Structure +type EnvoyAccessLogsServer struct { + envoyAccLogs.UnimplementedAccessLogServiceServer + collectorInterface +} + +// newEnvoyAccessLogsServer Function +func newEnvoyAccessLogsServer() *EnvoyAccessLogsServer { + ret := &EnvoyAccessLogsServer{} + return ret +} + +// registerService Function +func (evyAccLogs *EnvoyAccessLogsServer) registerService(server *grpc.Server) { + envoyAccLogs.RegisterAccessLogServiceServer(server, evyAccLogs) +} + +// == // + // EnvoyMetricsServer Structure type EnvoyMetricsServer struct { envoyMetrics.UnimplementedMetricsServiceServer @@ -25,78 +53,174 @@ func newEnvoyMetricsServer() *EnvoyMetricsServer { } // registerService Function -func (ems *EnvoyMetricsServer) registerService(server *grpc.Server) { - envoyMetrics.RegisterMetricsServiceServer(server, ems) +func (evyMetrics *EnvoyMetricsServer) registerService(server *grpc.Server) { + envoyMetrics.RegisterMetricsServiceServer(server, evyMetrics) } -// StreamMetrics Function -func (ems *EnvoyMetricsServer) StreamMetrics(stream envoyMetrics.MetricsService_StreamMetricsServer) error { - event, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - log.Printf("[Envoy] Something went on wrong when receiving event: %v", err) - return err +// == // + +// generateAPILogsFromEnvoy Function +func generateAPILogsFromEnvoy(entry *envoyAccLogsData.HTTPAccessLogEntry) *protobuf.APILog { + comm := entry.GetCommonProperties() + timeStamp := comm.GetStartTime().Seconds + + srcInform := entry.GetCommonProperties().GetDownstreamRemoteAddress().GetSocketAddress() + srcIP := srcInform.GetAddress() + srcPort := strconv.Itoa(int(srcInform.GetPortValue())) + src := k8s.LookupK8sResource(srcIP) + + dstInform := entry.GetCommonProperties().GetUpstreamRemoteAddress().GetSocketAddress() + dstIP := dstInform.GetAddress() + dstPort := strconv.Itoa(int(dstInform.GetPortValue())) + dst := k8s.LookupK8sResource(dstIP) + + request := entry.GetRequest() + response := entry.GetResponse() + + protocol := entry.GetProtocolVersion().String() + method := request.GetRequestMethod().String() + path := request.GetPath() + resCode := response.GetResponseCode().GetValue() + + envoyAPILog := &protobuf.APILog{ + Id: 0, // @todo zero for now + TimeStamp: strconv.FormatInt(timeStamp, 10), + + SrcNamespace: src.Namespace, + SrcName: src.Name, + SrcLabel: src.Labels, + SrcIP: srcIP, + SrcPort: srcPort, + SrcType: types.K8sResourceTypeToString(src.Type), + + DstNamespace: dst.Namespace, + DstName: dst.Name, + DstLabel: dst.Labels, + DstIP: dstIP, + DstPort: dstPort, + DstType: types.K8sResourceTypeToString(dst.Type), + + Protocol: protocol, + Method: method, + Path: path, + ResponseCode: int32(resCode), } - err = event.ValidateAll() - if err != nil { - log.Printf("[Envoy] Failed to validate stream: %v", err) + return envoyAPILog +} + +// StreamAccessLogs Function +func (evyAccLogs *EnvoyAccessLogsServer) StreamAccessLogs(stream envoyAccLogs.AccessLogService_StreamAccessLogsServer) error { + for { + event, err := stream.Recv() + if err == io.EOF { + return nil + } else if err != nil { + log.Printf("[EnvoyAPILogs] Failed to receive an event: %v", err) + return err + } + + if event.GetHttpLogs() != nil { + for _, entry := range event.GetHttpLogs().LogEntry { + envoyAPILog := generateAPILogsFromEnvoy(entry) + processor.InsertAPILog(envoyAPILog) + } + } } +} - // @todo parse this event entry into our format - identifier := event.GetIdentifier() - identifier.GetNode().GetMetadata() +// == // - if identifier != nil { - log.Printf("[Envoy] Received EnvoyMetric - ID: %s, %s", identifier.GetNode().GetId(), identifier.GetNode().GetCluster()) - metaData := identifier.GetNode().GetMetadata().AsMap() +// generateMetricsFromEnvoy Function +func generateMetricsFromEnvoy(event *envoyMetrics.StreamMetricsMessage, metaData map[string]interface{}) *protobuf.EnvoyMetrics { + envoyMetrics := &protobuf.EnvoyMetrics{ + TimeStamp: "", - envoyMetric := core.GenerateMetricFromEnvoy(event, metaData) + Namespace: metaData["NAMESPACE"].(string), + Name: metaData["NAME"].(string), + IPAddress: metaData["INSTANCE_IPS"].(string), + Labels: k8s.LookupK8sResource(metaData["INSTANCE_IPS"].(string)).Labels, - core.Lh.InsertLog(envoyMetric) + Metrics: make(map[string]*protobuf.MetricValue), } - return nil -} + envoyMetrics.Metrics["GAUGE"] = &protobuf.MetricValue{ + Value: make(map[string]string), + } -// EnvoyAccessLogsServer Structure -type EnvoyAccessLogsServer struct { - envoyAls.UnimplementedAccessLogServiceServer - collectorInterface -} + envoyMetrics.Metrics["COUNTER"] = &protobuf.MetricValue{ + Value: make(map[string]string), + } -// newEnvoyAccessLogsServer Function -func newEnvoyAccessLogsServer() *EnvoyAccessLogsServer { - ret := &EnvoyAccessLogsServer{} - return ret -} + envoyMetrics.Metrics["HISTOGRAM"] = &protobuf.MetricValue{ + Value: make(map[string]string), + } -// registerService Function -func (eas *EnvoyAccessLogsServer) registerService(server *grpc.Server) { - envoyAls.RegisterAccessLogServiceServer(server, eas) -} + envoyMetrics.Metrics["SUMMARY"] = &protobuf.MetricValue{ + Value: make(map[string]string), + } -// StreamAccessLogs Function -func (eas *EnvoyAccessLogsServer) StreamAccessLogs(stream envoyAls.AccessLogService_StreamAccessLogsServer) error { - for { - event, err := stream.Recv() - if err == io.EOF { - return nil - } + for _, metric := range event.GetEnvoyMetrics() { + metricType := metric.GetType().String() + metricName := metric.GetName() - if err != nil { - log.Printf("[Envoy] Something went on wrong when receiving event: %v", err) - return err + if envoyMetrics.Metrics[metricType].Value == nil { + continue } - // Check HTTP logs - if event.GetHttpLogs() != nil { - for _, entry := range event.GetHttpLogs().LogEntry { - envoyAccessLog := core.GenerateAccessLogsFromEnvoy(entry) - core.Lh.InsertLog(envoyAccessLog) + for _, metricDetail := range metric.GetMetric() { + var metricValue string + + if envoyMetrics.TimeStamp == "" { + envoyMetrics.TimeStamp = strconv.FormatInt(metricDetail.GetTimestampMs(), 10) + } + + if metricType == "GAUGE" { + metricValue = strconv.FormatFloat(metricDetail.GetGauge().GetValue(), 'f', -1, 64) } + + if metricType == "COUNTER" { + metricValue = strconv.FormatFloat(metricDetail.GetCounter().GetValue(), 'f', -1, 64) + } + + if metricType == "HISTOGRAM" { + metricValue = strconv.FormatUint(metricDetail.GetHistogram().GetSampleCount(), 10) + } + + if metricType == "SUMMARY" { + metricValue = strconv.FormatUint(metricDetail.GetHistogram().GetSampleCount(), 10) + } + + envoyMetrics.Metrics[metricType].Value[metricName] = metricValue } } + + return envoyMetrics } + +// StreamMetrics Function +func (evyMetrics *EnvoyMetricsServer) StreamMetrics(stream envoyMetrics.MetricsService_StreamMetricsServer) error { + event, err := stream.Recv() + if err == io.EOF { + return nil + } else if err != nil { + log.Printf("[EnvoyMetrics] Failed to receive an event: %v", err) + return err + } + + err = event.ValidateAll() + if err != nil { + log.Printf("[EnvoyMetrics] Failed to validate an event: %v", err) + } + + identifier := event.GetIdentifier() + if identifier != nil { + metaData := identifier.GetNode().GetMetadata().AsMap() + envoyMetrics := generateMetricsFromEnvoy(event, metaData) + processor.InsertMetrics(envoyMetrics) + } + + return nil +} + +// == // diff --git a/sentryflow/collector/interface.go b/sentryflow/collector/interface.go index 154d83c..a610c4f 100644 --- a/sentryflow/collector/interface.go +++ b/sentryflow/collector/interface.go @@ -2,9 +2,15 @@ package collector -import "google.golang.org/grpc" +import ( + "google.golang.org/grpc" +) + +// == // // collectorInterface Interface type collectorInterface interface { registerService(server *grpc.Server) } + +// == // diff --git a/sentryflow/collector/opentelemetry.go b/sentryflow/collector/opentelemetry.go index a69439b..05f9830 100644 --- a/sentryflow/collector/opentelemetry.go +++ b/sentryflow/collector/opentelemetry.go @@ -4,41 +4,140 @@ package collector import ( "context" - "github.com/5GSEC/SentryFlow/core" + "strconv" + "strings" + + "github.com/5gsec/SentryFlow/k8s" + "github.com/5gsec/SentryFlow/processor" + "github.com/5gsec/SentryFlow/protobuf" + "github.com/5gsec/SentryFlow/types" otelLogs "go.opentelemetry.io/proto/otlp/collector/logs/v1" "google.golang.org/grpc" ) -// OtelLogServer structure -type OtelLogServer struct { +// == // + +// OpenTelemetryLogsServer structure +type OpenTelemetryLogsServer struct { otelLogs.UnimplementedLogsServiceServer collectorInterface } -// newOtelLogServer Function -func newOtelLogServer() *OtelLogServer { - ret := &OtelLogServer{} +// newOpenTelemetryLogsServer Function +func newOpenTelemetryLogsServer() *OpenTelemetryLogsServer { + ret := &OpenTelemetryLogsServer{} return ret } // registerService Function -func (ols *OtelLogServer) registerService(server *grpc.Server) { - otelLogs.RegisterLogsServiceServer(server, ols) +func (otlLogs *OpenTelemetryLogsServer) registerService(server *grpc.Server) { + otelLogs.RegisterLogsServiceServer(server, otlLogs) } -// Export Function -func (ols *OtelLogServer) Export(_ context.Context, req *otelLogs.ExportLogsServiceRequest) (*otelLogs.ExportLogsServiceResponse, error) { - // This is for Log.Export in OpenTelemetry format - als := core.GenerateAccessLogsFromOtel(req.String()) +// == // + +// generateAPILogsFromOtel Function +func generateAPILogsFromOtel(logText string) []*protobuf.APILog { + apiLogs := make([]*protobuf.APILog, 0) + + // Preprocess redundant chars + logText = strings.ReplaceAll(logText, `\"`, "") + logText = strings.ReplaceAll(logText, `}`, "") + + // Split logs by log_records, this is a single access log instance + parts := strings.Split(logText, "log_records") + if len(parts) == 0 { + return nil + } + + // Ignore the first entry (the metadata "resource_logs:{resource:{ scope_logs:{" part) + for _, accessLog := range parts[0:] { + var srcIP string + var srcPort string + var dstIP string + var dstPort string + + if len(accessLog) == 0 { + continue + } + + index := strings.Index(accessLog, "string_value:\"") + if index == -1 { + continue + } + + words := strings.Fields(accessLog[index+len("string_value:\""):]) + + timeStamp := words[0] + method := words[1] + path := words[2] + protocol := words[3] + resCode, _ := strconv.ParseInt(words[4], 10, 64) + + srcInform := words[21] - for _, al := range als { - core.Lh.InsertLog(al) + // Extract the left and right words based on the colon delimiter (ADDR:PORT) + colonIndex := strings.LastIndex(srcInform, ":") + if colonIndex > 0 && colonIndex < len(srcInform)-1 { + srcIP = strings.TrimSpace(srcInform[:colonIndex]) + srcPort = strings.TrimSpace(srcInform[colonIndex+1:]) + } + src := k8s.LookupK8sResource(srcIP) + + dstInform := words[20] + + // Extract the left and right words based on the colon delimiter (ADDR:PORT) + colonIndex = strings.LastIndex(dstInform, ":") + if colonIndex > 0 && colonIndex < len(dstInform)-1 { + dstIP = strings.TrimSpace(dstInform[:colonIndex]) + dstPort = strings.TrimSpace(dstInform[colonIndex+1:]) + } + dst := k8s.LookupK8sResource(dstIP) + + // Create APILog + apiLog := protobuf.APILog{ + Id: 0, // @todo zero for now + TimeStamp: timeStamp, + + SrcNamespace: src.Namespace, + SrcName: src.Name, + SrcLabel: src.Labels, + SrcIP: srcIP, + SrcPort: srcPort, + SrcType: types.K8sResourceTypeToString(src.Type), + + DstNamespace: dst.Namespace, + DstName: dst.Name, + DstLabel: dst.Labels, + DstIP: dstIP, + DstPort: dstPort, + DstType: types.K8sResourceTypeToString(dst.Type), + + Protocol: protocol, + Method: method, + Path: path, + ResponseCode: int32(resCode), + } + + apiLogs = append(apiLogs, &apiLog) + } + + return apiLogs +} + +// Export Function for Log.Export in OpenTelemetry format +func (otlLogs *OpenTelemetryLogsServer) Export(_ context.Context, req *otelLogs.ExportLogsServiceRequest) (*otelLogs.ExportLogsServiceResponse, error) { + apiLogs := generateAPILogsFromOtel(req.String()) + for _, apiLog := range apiLogs { + processor.InsertAPILog(apiLog) } - // For now, we will not consider partial success + // @todo not consider partial success ret := otelLogs.ExportLogsServiceResponse{ PartialSuccess: nil, } return &ret, nil } + +// == // diff --git a/sentryflow/config/config.go b/sentryflow/config/config.go index 12bb9dd..03fa377 100644 --- a/sentryflow/config/config.go +++ b/sentryflow/config/config.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "log" - "os" "strings" "github.com/spf13/viper" @@ -14,30 +13,27 @@ import ( // SentryFlowConfig structure type SentryFlowConfig struct { - OtelGRPCListenAddr string // IP address to use for OTEL gRPC - OtelGRPCListenPort string // Port to use for OTEL gRPC + CollectorAddr string // Address for Collector gRPC + CollectorPort string // Port for Collector gRPC - CustomExportListenAddr string // IP address to use for custom exporter gRPC - CustomExportListenPort string // Port to use for custom exporter gRPC + ExporterAddr string // IP address to use for exporter gRPC + ExporterPort string // Port to use for exporter gRPC - PatchNamespace bool // Enable/Disable patching namespace for Istio injection - PatchRestartDeployments bool // Enable/Disable restarting deployments after patching + PatchingNamespaces bool // Enable/Disable patching namespaces with 'istio-injection' + RestartingPatchedDeployments bool // Enable/Disable restarting deployments after patching - AIEngineService string - AIEngineServicePort string - AIEngineBatchSize int + AggregationPeriod int // Period for aggregating metrics + CleanUpPeriod int // Period for cleaning up outdated metrics - MetricsDBFileName string // String value of MetricsDB file (sqlite3 db file) - MetricsDBAggregationTime int // Value of APILog Aggregation Time - MetricsDBClearTime int // Value of APIMetric DB Clear time - APIMetricsSendTime int // Value of APIMetric send time + AIEngineService string // Address for AI Engine + AIEngineServicePort string // Port for AI Engine + AIEngineBatchSize int // Batch Size to send APIs to AI Engine - CollectorEnableOpenTelemetry bool // Enable/Disable OpenTelemetry Collector - Debug bool // Enable/Disable SentryFlow debug mode + Debug bool // Enable/Disable SentryFlow debug mode } -// GlobalCfg Global configuration for SentryFlow -var GlobalCfg SentryFlowConfig +// GlobalConfig Global configuration for SentryFlow +var GlobalConfig SentryFlowConfig // init Function func init() { @@ -46,39 +42,43 @@ func init() { // Config const const ( - OtelGRPCListenAddr string = "otelGRPCListenAddr" - OtelGRPCListenPort string = "otelGRPCListenPort" - CustomExportListenAddr string = "customExportListenAddr" - CustomExportListenPort string = "customExportListenPort" - PatchNamespace string = "patchNamespace" - PatchRestartDeployments string = "patchRestartDeployments" - AIEngineService string = "aiEngineService" - AIEngineServicePort string = "aiEngineServicePort" - AIEngineBatchSize string = "aiEngineBatchSize" - MetricsDBFileName string = "metricsDBFileName" - MetricsDBAggregationTime string = "metricsDBAggregationTime" - MetricsDBClearTime string = "metricsDBClearTime" - APIMetricsSendTime string = "apiMetricsSendTime" - CollectorEnableOpenTelemetry string = "collectorEnableOpenTelemetry" - Debug string = "debug" + CollectorAddr string = "collectorAddr" + CollectorPort string = "collectorPort" + + ExporterAddr string = "exporterAddr" + ExporterPort string = "exporterPort" + + PatchingNamespaces string = "patchingNamespaces" + RestartingPatchedDeployments string = "restartingPatchedDeployments" + + AggregationPeriod string = "aggregationPeriod" + CleanUpPeriod string = "cleanUpPeriod" + + AIEngineService string = "aiEngineService" + AIEngineServicePort string = "aiEngineServicePort" + AIEngineBatchSize string = "aiEngineBatchSize" + + Debug string = "debug" ) func readCmdLineParams() { - otelGRPCListenAddrStr := flag.String(OtelGRPCListenAddr, "0.0.0.0", "OTEL gRPC server listen address") - otelGRPCListenPortStr := flag.String(OtelGRPCListenPort, "4317", "OTEL gRPC server listen port") - customExportListenAddrStr := flag.String(CustomExportListenAddr, "0.0.0.0", "Custom export gRPC server listen address") - customExportListenPortStr := flag.String(CustomExportListenPort, "8080", "Custom export gRPC server listen port") - patchNamespaceB := flag.Bool(PatchNamespace, false, "Enable/Disable patching Istio injection to all namespaces") - patchRestartDeploymentsB := flag.Bool(PatchRestartDeployments, false, "Enable/Disable restarting deployments in all namespaces") - aiEngineServiceStr := flag.String(AIEngineService, "ai-engine.sentryflow.svc.cluster.local", "Service address for SentryFlow AI Engine") - aiEngineServicePortStr := flag.String(AIEngineServicePort, "5000", "Service Port for SentryFlow AI Engine") - aiEngineBatchSizeInt := flag.Int(AIEngineBatchSize, 5, "Batch size for SentryFlow AI Engine") - metricsDBFileNameStr := flag.String(MetricsDBFileName, "/etc/sentryflow/metrics.db", "File name for local metrics DB") - metricsDBAggregationTimeInt := flag.Int(MetricsDBAggregationTime, 10, "Term time between aggregations") - metricsDBClearTimeInt := flag.Int(MetricsDBClearTime, 600, "Metrics DB Clear Time") - APIMetricsSendTimeInt := flag.Int(APIMetricsSendTime, 10, "APIMetric send term") - collectorEnableOpenTelemetryB := flag.Bool(CollectorEnableOpenTelemetry, true, "Enable/Disable OpenTelemetry Collector") - configDebugB := flag.Bool(Debug, false, "Enable/Disable debugging mode using logs") + collectorAddrStr := flag.String(CollectorAddr, "0.0.0.0", "Address for Collector gRPC") + collectorPortStr := flag.String(CollectorPort, "4317", "Port for Collector gRPC") + + exporterAddrStr := flag.String(ExporterAddr, "0.0.0.0", "Address for Exporter gRPC") + exporterPortStr := flag.String(ExporterPort, "8080", "Port for Exporter gRPC") + + patchingNamespacesB := flag.Bool(PatchingNamespaces, false, "Enable patching 'istio-injection' to all namespaces") + restartingPatchedDeploymentsB := flag.Bool(RestartingPatchedDeployments, false, "Enable restarting the deployments in all patched namespaces") + + aggregationPeriodInt := flag.Int(AggregationPeriod, 1, "Period for aggregating metrics") + cleanUpPeriodInt := flag.Int(CleanUpPeriod, 5, "Period for cleanning up outdated metrics") + + aiEngineServiceStr := flag.String(AIEngineService, "ai-engine.sentryflow.svc.cluster.local", "Address for SentryFlow AI Engine") + aiEngineServicePortStr := flag.String(AIEngineServicePort, "5000", "Port for SentryFlow AI Engine") + aiEngineBatchSizeInt := flag.Int(AIEngineBatchSize, 5, "Batch size to send APIs to SentryFlow AI Engine") + + configDebugB := flag.Bool(Debug, false, "Enable debugging mode") var flags []string flag.VisitAll(func(f *flag.Flag) { @@ -89,20 +89,22 @@ func readCmdLineParams() { flag.Parse() - viper.SetDefault(OtelGRPCListenAddr, *otelGRPCListenAddrStr) - viper.SetDefault(OtelGRPCListenPort, *otelGRPCListenPortStr) - viper.SetDefault(CustomExportListenAddr, *customExportListenAddrStr) - viper.SetDefault(CustomExportListenPort, *customExportListenPortStr) - viper.SetDefault(PatchNamespace, *patchNamespaceB) - viper.SetDefault(PatchRestartDeployments, *patchRestartDeploymentsB) + viper.SetDefault(CollectorAddr, *collectorAddrStr) + viper.SetDefault(CollectorPort, *collectorPortStr) + + viper.SetDefault(ExporterAddr, *exporterAddrStr) + viper.SetDefault(ExporterPort, *exporterPortStr) + + viper.SetDefault(PatchingNamespaces, *patchingNamespacesB) + viper.SetDefault(RestartingPatchedDeployments, *restartingPatchedDeploymentsB) + + viper.SetDefault(AggregationPeriod, *aggregationPeriodInt) + viper.SetDefault(CleanUpPeriod, *cleanUpPeriodInt) + viper.SetDefault(AIEngineService, *aiEngineServiceStr) viper.SetDefault(AIEngineServicePort, *aiEngineServicePortStr) viper.SetDefault(AIEngineBatchSize, *aiEngineBatchSizeInt) - viper.SetDefault(MetricsDBFileName, *metricsDBFileNameStr) - viper.SetDefault(MetricsDBAggregationTime, *metricsDBAggregationTimeInt) - viper.SetDefault(MetricsDBClearTime, *metricsDBClearTimeInt) - viper.SetDefault(APIMetricsSendTime, *APIMetricsSendTimeInt) - viper.SetDefault(CollectorEnableOpenTelemetry, *collectorEnableOpenTelemetryB) + viper.SetDefault(Debug, *configDebugB) } @@ -114,26 +116,25 @@ func LoadConfig() error { // Read environment variable, those are upper-cased viper.AutomaticEnv() - // todo: read configuration from config file - _ = os.Getenv("SENTRYFLOW_CFG") - - GlobalCfg.OtelGRPCListenAddr = viper.GetString(OtelGRPCListenAddr) - GlobalCfg.OtelGRPCListenPort = viper.GetString(OtelGRPCListenPort) - GlobalCfg.CustomExportListenAddr = viper.GetString(CustomExportListenAddr) - GlobalCfg.CustomExportListenPort = viper.GetString(CustomExportListenPort) - GlobalCfg.PatchNamespace = viper.GetBool(PatchNamespace) - GlobalCfg.PatchRestartDeployments = viper.GetBool(PatchRestartDeployments) - GlobalCfg.AIEngineService = viper.GetString(AIEngineService) - GlobalCfg.AIEngineServicePort = viper.GetString(AIEngineServicePort) - GlobalCfg.AIEngineBatchSize = viper.GetInt(AIEngineBatchSize) - GlobalCfg.MetricsDBFileName = viper.GetString(MetricsDBFileName) - GlobalCfg.MetricsDBAggregationTime = viper.GetInt(MetricsDBAggregationTime) - GlobalCfg.MetricsDBClearTime = viper.GetInt(MetricsDBClearTime) - GlobalCfg.APIMetricsSendTime = viper.GetInt(APIMetricsSendTime) - GlobalCfg.CollectorEnableOpenTelemetry = viper.GetBool(CollectorEnableOpenTelemetry) - GlobalCfg.Debug = viper.GetBool(Debug) - - log.Printf("Configuration [%+v]", GlobalCfg) + GlobalConfig.CollectorAddr = viper.GetString(CollectorAddr) + GlobalConfig.CollectorPort = viper.GetString(CollectorPort) + + GlobalConfig.ExporterAddr = viper.GetString(ExporterAddr) + GlobalConfig.ExporterPort = viper.GetString(ExporterPort) + + GlobalConfig.PatchingNamespaces = viper.GetBool(PatchingNamespaces) + GlobalConfig.RestartingPatchedDeployments = viper.GetBool(RestartingPatchedDeployments) + + GlobalConfig.AggregationPeriod = viper.GetInt(AggregationPeriod) + GlobalConfig.CleanUpPeriod = viper.GetInt(CleanUpPeriod) + + GlobalConfig.AIEngineService = viper.GetString(AIEngineService) + GlobalConfig.AIEngineServicePort = viper.GetString(AIEngineServicePort) + GlobalConfig.AIEngineBatchSize = viper.GetInt(AIEngineBatchSize) + + GlobalConfig.Debug = viper.GetBool(Debug) + + log.Printf("Configuration [%+v]", GlobalConfig) return nil } diff --git a/sentryflow/core/k8sHandler.go b/sentryflow/core/k8sHandler.go deleted file mode 100644 index 193af0d..0000000 --- a/sentryflow/core/k8sHandler.go +++ /dev/null @@ -1,547 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "context" - "log" - "sync" - "time" - - "gopkg.in/yaml.v2" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "github.com/5GSEC/SentryFlow/config" - "github.com/5GSEC/SentryFlow/types" -) - -// K8s global reference for Kubernetes Handler -var K8s *K8sHandler - -// init Function -func init() { - K8s = NewK8sHandler() -} - -// K8sHandler Structure -type K8sHandler struct { - config *rest.Config - clientSet *kubernetes.Clientset - - listWatchers map[string]*cache.ListWatch - informers map[string]cache.Controller - podMap map[string]*corev1.Pod // This map is NOT thread safe, meaning that race condition might occur - svcMap map[string]*corev1.Service // This map is NOT thread safe, meaning that race condition might occur -} - -// NewK8sHandler Function -func NewK8sHandler() *K8sHandler { - kh := &K8sHandler{ - listWatchers: make(map[string]*cache.ListWatch), - podMap: make(map[string]*corev1.Pod), - svcMap: make(map[string]*corev1.Service), - informers: make(map[string]cache.Controller), - } - - return kh -} - -// InitK8sClient Function -func (kh *K8sHandler) InitK8sClient() bool { - var err error - - // Initialize in cluster config - kh.config, err = rest.InClusterConfig() - if err != nil { - return false - } - - // Initialize Kubernetes clientSet - kh.clientSet, err = kubernetes.NewForConfig(kh.config) - if err != nil { - return false - } - - watchTargets := []string{"pods", "services"} - - // Look for existing resources in the cluster, create map - kh.initExistingResources() - - // Initialize watchers and informers for services and pods - // This will not run the informers yet - kh.initWatchers(watchTargets) - kh.initInformers() - - return true -} - -// initWatchers initializes watchers for pods and services in cluster -func (kh *K8sHandler) initWatchers(watchTargets []string) { - // Initialize watch for pods and services - for _, target := range watchTargets { - watcher := cache.NewListWatchFromClient( - kh.clientSet.CoreV1().RESTClient(), - target, - corev1.NamespaceAll, - fields.Everything(), - ) - kh.listWatchers[target] = watcher - } -} - -// initExistingResources will create a mapping table for existing services and pods into IPs -// This is required since informers are NOT going to see existing resources until they are updated, created or deleted -// Todo: Refactor this function, this is kind of messy -func (kh *K8sHandler) initExistingResources() { - // List existing Pods - podList, err := kh.clientSet.CoreV1().Pods(corev1.NamespaceAll).List(context.TODO(), v1.ListOptions{}) - if err != nil { - log.Print("Error listing Pods:", err.Error()) - return - } - - // Add existing Pods to the podMap - for _, pod := range podList.Items { - currentPod := pod - kh.podMap[pod.Status.PodIP] = ¤tPod - log.Printf("[K8s] Add existing pod %s: %s/%s", pod.Status.PodIP, pod.Namespace, pod.Name) - } - - // List existing Services - serviceList, err := kh.clientSet.CoreV1().Services(corev1.NamespaceAll).List(context.TODO(), v1.ListOptions{}) - if err != nil { - log.Print("Error listing Services:", err.Error()) - return - } - - // Add existing Services to the svcMap - for _, service := range serviceList.Items { - currentService := service // This will solve G601 for gosec - - // Check if the service has a LoadBalancer type - if service.Spec.Type == "LoadBalancer" { - for _, lbIngress := range service.Status.LoadBalancer.Ingress { - lbIP := lbIngress.IP - if lbIP != "" { - kh.svcMap[lbIP] = ¤tService - log.Printf("[K8s] Add existing service (LoadBalancer) %s: %s/%s", lbIP, service.Namespace, service.Name) - } - } - } else { - kh.svcMap[service.Spec.ClusterIP] = ¤tService - if len(service.Spec.ExternalIPs) != 0 { - for _, eIP := range service.Spec.ExternalIPs { - kh.svcMap[eIP] = ¤tService - log.Printf("[K8s] Add existing service %s: %s/%s", eIP, service.Namespace, service.Name) - } - } - } - } -} - -// initInformers initializes informers for services and pods in cluster -func (kh *K8sHandler) initInformers() { - // Create Pod controller informer - _, pc := cache.NewInformer( - kh.listWatchers["pods"], - &corev1.Pod{}, - time.Second*0, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { // Add pod information - pod := obj.(*corev1.Pod) - kh.podMap[pod.Status.PodIP] = pod - }, - UpdateFunc: func(oldObj, newObj interface{}) { // Update pod information - newPod := newObj.(*corev1.Pod) - kh.podMap[newPod.Status.PodIP] = newPod - }, - DeleteFunc: func(obj interface{}) { // Remove deleted pod information - pod := obj.(*corev1.Pod) - delete(kh.podMap, pod.Status.PodIP) - }, - }, - ) - - kh.informers["pods"] = pc - - // Create Service controller informer - _, sc := cache.NewInformer( - kh.listWatchers["services"], - &corev1.Service{}, - time.Second*0, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { // Add service information - service := obj.(*corev1.Service) - - if service.Spec.Type == "LoadBalancer" { - for _, lbIngress := range service.Status.LoadBalancer.Ingress { - lbIP := lbIngress.IP - if lbIP != "" { - kh.svcMap[lbIP] = service - } - } - } else { - kh.svcMap[service.Spec.ClusterIP] = service - if len(service.Spec.ExternalIPs) != 0 { - for _, eIP := range service.Spec.ExternalIPs { - kh.svcMap[eIP] = service - } - } - } - }, - UpdateFunc: func(oldObj, newObj interface{}) { // Update service information - newService := newObj.(*corev1.Service) - if newService.Spec.Type == "LoadBalancer" { - for _, lbIngress := range newService.Status.LoadBalancer.Ingress { - lbIP := lbIngress.IP - if lbIP != "" { - kh.svcMap[lbIP] = newService - } - } - } else { - kh.svcMap[newService.Spec.ClusterIP] = newService - if len(newService.Spec.ExternalIPs) != 0 { - for _, eIP := range newService.Spec.ExternalIPs { - kh.svcMap[eIP] = newService - } - } - } - }, - DeleteFunc: func(obj interface{}) { - service := obj.(*corev1.Service) - if service.Spec.Type == "LoadBalancer" { - for _, lbIngress := range service.Status.LoadBalancer.Ingress { - lbIP := lbIngress.IP - if lbIP != "" { - delete(kh.svcMap, lbIP) - } - } - } else { - delete(kh.svcMap, service.Spec.ClusterIP) // Remove deleted service information - if len(service.Spec.ExternalIPs) != 0 { - for _, eIP := range service.Spec.ExternalIPs { - delete(kh.svcMap, eIP) - } - } - } - }, - }, - ) - - kh.informers["services"] = sc -} - -// RunInformers starts running informers -func (kh *K8sHandler) RunInformers(stopCh chan struct{}, wg *sync.WaitGroup) { - wg.Add(1) - for name, informer := range kh.informers { - name := name - informer := informer - go func() { - log.Printf("[K8s] Started informers for %s", name) - informer.Run(stopCh) - - defer wg.Done() - }() - } - - log.Printf("[K8s] Started all informers") -} - -// lookupIPAddress Function -func (kh *K8sHandler) lookupIPAddress(ipAddr string) interface{} { - // Look for pod map first - pod, ok := kh.podMap[ipAddr] - if ok { - return pod - } - - // Look for service map - service, ok := kh.svcMap[ipAddr] - if ok { - return service - } - - return nil -} - -// LookupNetworkedResource Function -func LookupNetworkedResource(srcIP string) types.K8sNetworkedResource { - ret := types.K8sNetworkedResource{ - Name: "Unknown", - Namespace: "Unknown", - Labels: make(map[string]string), - Type: types.K8sResourceTypeUnknown, - } - - // Find Kubernetes resource from source IP (service or a pod) - raw := K8s.lookupIPAddress(srcIP) - - // Currently supports Service or Pod - switch raw.(type) { - case *corev1.Pod: - pod, ok := raw.(*corev1.Pod) - if ok { - ret.Name = pod.Name - ret.Namespace = pod.Namespace - ret.Labels = pod.Labels - ret.Type = types.K8sResourceTypePod - } - case *corev1.Service: - svc, ok := raw.(*corev1.Service) - if ok { - ret.Name = svc.Name - ret.Namespace = svc.Namespace - ret.Labels = svc.Labels - ret.Type = types.K8sResourceTypeService - } - default: - ret.Type = types.K8sResourceTypeUnknown - } - - return ret -} - -// PatchIstioConfigMap patches the Istio's configmap for meshConfig -// This will make istio know that there is an exporter with envoyOtelAls -func (kh *K8sHandler) PatchIstioConfigMap() error { - // Get the ConfigMap istio-system/istio - configMap, err := kh.clientSet.CoreV1(). - ConfigMaps("istio-system"). - Get(context.Background(), "istio", v1.GetOptions{}) - if err != nil { - // Handle error - log.Fatalf("[Patcher] Unable to retrieve configmap istio-system/istio :%v", err) - return err - } - - // Define a map to represent the structure of the mesh configuration - var meshConfig map[string]interface{} - - // Unmarshal the YAML string into the map - meshConfigStr := configMap.Data["mesh"] - err = yaml.Unmarshal([]byte(meshConfigStr), &meshConfig) - if err != nil { - // Handle error - log.Fatalf("[Patcher] Unable to unmarshall configmap istio-system/istio :%v", err) - return err - } - - _, eeaExist := meshConfig["enableEnvoyAccessLogService"] - if eeaExist { - log.Printf("Overwrite the contents of \"enableEnvoyAccessLogService\"") - } - meshConfig["enableEnvoyAccessLogService"] = true - - _, ealExist := meshConfig["defaultConfig"].(map[interface{}]interface{})["envoyAccessLogService"] - if ealExist { - log.Printf("Overwrite the contents of \"defaultConfig.envoyAccessLogService\"") - } - meshConfig["defaultConfig"].(map[interface{}]interface{})["envoyAccessLogService"] = map[string]string{ - "address": "sentryflow.sentryflow.svc.cluster.local:4317", - } - - _, emExist := meshConfig["defaultConfig"].(map[interface{}]interface{})["envoyMetricsService"] - if emExist { - log.Printf("Overwrite the contents of \"defaultConfig.envoyMetricsService\"") - } - meshConfig["defaultConfig"].(map[interface{}]interface{})["envoyMetricsService"] = map[string]string{ - "address": "sentryflow.sentryflow.svc.cluster.local:4317", - } - - // Work with defaultProviders.accessLogs - dp, exists := meshConfig["defaultProviders"].(map[interface{}]interface{})["accessLogs"] - if !exists { // Add defaultProviders.accessLogs if it does not exist - meshConfig["defaultProviders"].(map[interface{}]interface{})["accessLogs"] = []string{"sentryflow"} - } else { // Just add a new entry sentryflow if it exists - dpSlice := dp.([]interface{}) // @todo find better solution for this - duplicate := false - for _, entry := range dpSlice { - if entry == "sentryflow" { - // If "sentryflow" already exists, do nothing - log.Printf("[Patcher] istio-system/istio ConfigMap has " + - "sentryflow under defaultProviders.accessLogs, ignoring... ") - duplicate = true - break - } - } - - // If "sentryflow" does not exist, append it - if !duplicate { - dpSlice = append(dpSlice, "sentryflow") - meshConfig["defaultProviders"].(map[interface{}]interface{})["accessLogs"] = dpSlice - } - } - - // ExtensionProvider for our service - eps := map[interface{}]interface{}{ - "name": "sentryflow", - "envoyOtelAls": map[interface{}]interface{}{ - "service": "sentryflow.sentryflow.svc.cluster.local", - "port": config.GlobalCfg.OtelGRPCListenPort, - }, - } - - // Work with extensionProviders - ep, exists := meshConfig["extensionProviders"] - if !exists { - // Create extensionProviders as a slice containing only the eps map - meshConfig["extensionProviders"] = []map[interface{}]interface{}{eps} - } else { - // Check if eps already exists in extensionProviders - epSlice, ok := ep.([]interface{}) - if !ok { - // handle the case where ep is not []interface{} - log.Printf("[Patcher] istio-system/istio ConfigMap extensionProviders has unexpected type") - } - - duplicate := false - for _, entry := range epSlice { - entryMap, ok := entry.(map[interface{}]interface{}) - if !ok { - // handle the case where an entry is not map[interface{}]interface{} - log.Printf("[Patcher] istio-system/istio ConfigMap extensionProviders entry has unexpected type") - } - if entryMap["name"] == eps["name"] { - // If "sentryflow" already exists, do nothing - log.Printf("[Patcher] istio-system/istio ConfigMap has sentryflow under extensionProviders, ignoring... ") - duplicate = true - break - } - } - - // Append eps to the existing slice - if !duplicate { - meshConfig["extensionProviders"] = append(ep.([]interface{}), eps) - } - } - - // Update the ConfigMap data with the modified meshConfig - updatedMeshConfig, err := yaml.Marshal(meshConfig) - if err != nil { - // Handle error - log.Fatalf("[Patcher] Unable to marshal updated meshConfig to YAML: %v", err) - return err - } - - // Convert the []byte to string - configMap.Data["mesh"] = string(updatedMeshConfig) - - // Preview changes, for debugging - if config.GlobalCfg.Debug { - log.Printf("[PATCH] Patching istio-system/istio ConfigMap as: \n%v", configMap) - } - - // Patch the ConfigMap back to the cluster - updatedConfigMap, err := kh.clientSet.CoreV1(). - ConfigMaps("istio-system"). - Update(context.Background(), configMap, v1.UpdateOptions{}) - if err != nil { - // Handle error - log.Fatalf("[Patcher] Unable to update configmap istio-system/istio :%v", err) - return err - } - - // Update successful - if config.GlobalCfg.Debug { - log.Printf("[Patcher] Updated istio-system/istio ConfigMap as: \n%v", updatedConfigMap) - } - return nil -} - -// PatchNamespaces patches namespaces for adding istio injection -func (kh *K8sHandler) PatchNamespaces() error { - // Get the list of namespaces - namespaces, err := kh.clientSet.CoreV1().Namespaces().List(context.Background(), v1.ListOptions{}) - if err != nil { - // Handle error - log.Fatalf("[Patcher] Unable to list namespaces: %v", err) - return err - } - - // Loop through each namespace and update it with the desired labels - // @todo make this skip adding labeles to namespaces which are defined in the config - for _, ns := range namespaces.Items { - currentNs := ns - - // We are not going to inject sidecars to sentryflow namespace - if currentNs.Name == "sentryflow" { - continue - } - - // Add istio-injection="enabled" for namespaces - currentNs.Labels["istio-injection"] = "enabled" - - // Update the namespace in the cluster - updatedNamespace, err := kh.clientSet.CoreV1().Namespaces().Update(context.TODO(), ¤tNs, v1.UpdateOptions{ - FieldManager: "patcher", - }) - if err != nil { - log.Printf("[Patcher] Unable to update namespace %s: %v", currentNs.Name, err) - return err - } - - log.Printf("[Patcher] Updated Namespace: %s\n", updatedNamespace.Name) - } - - return nil -} - -// PatchRestartDeployments restarts the deployments in namespaces which were applied with "istio-injection": "enabled" -func (kh *K8sHandler) PatchRestartDeployments() error { - // Get the list of all deployments in all namespaces - deployments, err := kh.clientSet.AppsV1().Deployments("").List(context.Background(), v1.ListOptions{}) - if err != nil { - // Handle error - log.Fatalf("[Patcher] Unable to list deployments: %v", err) - return err - } - - // Iterate over each deployment and restart it - for _, deployment := range deployments.Items { - // We are not going to inject sidecars to sentryflow namespace - if deployment.Namespace == "sentryflow" { - continue - } - - // Restart the deployment - err := kh.restartDeployment(deployment.Namespace, deployment.Name) - if err != nil { - // Handle error - log.Printf("[Patcher] Unable to restart deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) - continue - } - - log.Printf("[Patcher] Deployment %s/%s restarted", deployment.Namespace, deployment.Name) - } - - return nil -} - -// restartDeployment performs a rolling restart for a deployment in the specified namespace -// @todo: fix this, this DOES NOT restart deployments -func (kh *K8sHandler) restartDeployment(namespace string, deploymentName string) error { - deploymentClient := kh.clientSet.AppsV1().Deployments(namespace) - - // Get the deployment to retrieve the current spec - deployment, err := deploymentClient.Get(context.Background(), deploymentName, v1.GetOptions{}) - if err != nil { - return err - } - - // Trigger a rolling restart by updating the deployment's labels or annotations - deployment.Spec.Template.ObjectMeta.Labels["restartedAt"] = v1.Now().String() - - // Update the deployment to trigger the rolling restart - _, err = deploymentClient.Update(context.TODO(), deployment, v1.UpdateOptions{}) - if err != nil { - return err - } - - return nil -} diff --git a/sentryflow/core/logHandler.go b/sentryflow/core/logHandler.go deleted file mode 100644 index 1528994..0000000 --- a/sentryflow/core/logHandler.go +++ /dev/null @@ -1,299 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package core - -import ( - "log" - "strconv" - "strings" - "sync" - - "github.com/5GSEC/SentryFlow/exporter" - "github.com/5GSEC/SentryFlow/metrics" - "github.com/5GSEC/SentryFlow/protobuf" - "github.com/5GSEC/SentryFlow/types" - accesslogv3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3" - metricv3 "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v3" -) - -// Lh global reference for LogHandler -var Lh *LogHandler - -// init Function -func init() { - Lh = NewLogHandler() -} - -// LogHandler Structure -type LogHandler struct { - stopChan chan struct{} - logChan chan interface{} -} - -// aggregationLog Structure -type aggregationLog struct { - Logs []*protobuf.APILog - Labels map[string]string - Annotations map[string]string -} - -// NewLogHandler Structure -func NewLogHandler() *LogHandler { - lh := &LogHandler{ - stopChan: make(chan struct{}), - logChan: make(chan interface{}), - } - - return lh -} - -// StartLogProcessor Function -func StartLogProcessor(wg *sync.WaitGroup) { - go Lh.logProcessingRoutine(wg) -} - -// StopLogProcessor Function -func StopLogProcessor() { - Lh.stopChan <- struct{}{} -} - -// InsertLog Function -func (lh *LogHandler) InsertLog(data interface{}) { - lh.logChan <- data -} - -// logProcessingRoutine Function -func (lh *LogHandler) logProcessingRoutine(wg *sync.WaitGroup) { - wg.Add(1) - for { - select { - case l, ok := <-lh.logChan: - if !ok { - log.Printf("[Error] Unable to process log") - } - - // Check new log's type - switch l.(type) { - case *protobuf.APILog: - go processAccessLog(l.(*protobuf.APILog)) - case *protobuf.EnvoyMetric: - go processEnvoyMetric(l.(*protobuf.EnvoyMetric)) - } - - case <-lh.stopChan: - wg.Done() - return - } - } -} - -// processAccessLog Function -func processAccessLog(al *protobuf.APILog) { - // Send AccessLog to exporter first - exporter.InsertAccessLog(al) - - // Then send AccessLog to metrics - metrics.InsertAccessLog(al) -} - -// processEnvoyMetric Function -func processEnvoyMetric(em *protobuf.EnvoyMetric) { - exporter.InsertEnvoyMetric(em) -} - -// GenerateAccessLogsFromOtel Function -func GenerateAccessLogsFromOtel(logText string) []*protobuf.APILog { - // @todo this needs more optimization, this code is kind of messy - // Create an array of AccessLogs for returning gRPC comm - var index int - ret := make([]*protobuf.APILog, 0) - - // Preprocess redundant chars - logText = strings.ReplaceAll(logText, `\"`, "") - logText = strings.ReplaceAll(logText, `}`, "") - - // Split logs by log_records, this is single access log instance - parts := strings.Split(logText, "log_records") - if len(parts) == 0 { - return nil - } - - // Ignore the first entry, this was the metadata "resource_logs:{resource:{ scope_logs:{" part. - for _, al := range parts[0:] { - if len(al) == 0 { - continue - } - - index = strings.Index(al, "string_value:\"") - if index == -1 { - continue - } - - result := al[index+len("string_value:\""):] - words := strings.Fields(result) - - method := words[1] - path := words[2] - protocolName := words[3] - timeStamp := words[0] - resCode, _ := strconv.ParseInt(words[4], 10, 64) - - srcInform := words[21] - dstInform := words[20] - - var srcIP string - var dstIP string - var srcPort string - var dstPort string - var colonIndex int - - // Extract the left and right words based on the colon delimiter (ADDR:PORT) - colonIndex = strings.LastIndex(srcInform, ":") - if colonIndex > 0 && colonIndex < len(srcInform)-1 { - srcIP = strings.TrimSpace(srcInform[:colonIndex]) - srcPort = strings.TrimSpace(srcInform[colonIndex+1:]) - } - - colonIndex = strings.LastIndex(dstInform, ":") - if colonIndex > 0 && colonIndex < len(dstInform)-1 { - dstIP = strings.TrimSpace(dstInform[:colonIndex]) - dstPort = strings.TrimSpace(dstInform[colonIndex+1:]) - } - - // Lookup using K8s API - src := LookupNetworkedResource(srcIP) - dst := LookupNetworkedResource(dstIP) - - // Create AccessLog in our gRPC format - cur := protobuf.APILog{ - TimeStamp: timeStamp, - Id: 0, // do 0 for now, we are going to write it later - SrcNamespace: src.Namespace, - SrcName: src.Name, - SrcLabel: src.Labels, - SrcIP: srcIP, - SrcPort: srcPort, - SrcType: types.K8sResourceTypeToString(src.Type), - DstNamespace: dst.Namespace, - DstName: dst.Name, - DstLabel: dst.Labels, - DstIP: dstIP, - DstPort: dstPort, - DstType: types.K8sResourceTypeToString(dst.Type), - Protocol: protocolName, - Method: method, - Path: path, - ResponseCode: int32(resCode), - } - - ret = append(ret, &cur) - } - - return ret -} - -// GenerateAccessLogsFromEnvoy Function -func GenerateAccessLogsFromEnvoy(entry *accesslogv3.HTTPAccessLogEntry) *protobuf.APILog { - srcInform := entry.GetCommonProperties().GetDownstreamRemoteAddress().GetSocketAddress() - srcIP := srcInform.GetAddress() - srcPort := strconv.Itoa(int(srcInform.GetPortValue())) - src := LookupNetworkedResource(srcIP) - - dstInform := entry.GetCommonProperties().GetUpstreamRemoteAddress().GetSocketAddress() - dstIP := dstInform.GetAddress() - dstPort := strconv.Itoa(int(dstInform.GetPortValue())) - dst := LookupNetworkedResource(dstIP) - - req := entry.GetRequest() - res := entry.GetResponse() - comm := entry.GetCommonProperties() - proto := entry.GetProtocolVersion() - - timeStamp := comm.GetStartTime().Seconds - path := req.GetPath() - method := req.GetRequestMethod().String() - protocolName := proto.String() - resCode := res.GetResponseCode().GetValue() - - envoyAccessLog := &protobuf.APILog{ - TimeStamp: strconv.FormatInt(timeStamp, 10), - Id: 0, // do 0 for now, we are going to write it later - SrcNamespace: src.Namespace, - SrcName: src.Name, - SrcLabel: src.Labels, - SrcIP: srcIP, - SrcPort: srcPort, - SrcType: types.K8sResourceTypeToString(src.Type), - DstNamespace: dst.Namespace, - DstName: dst.Name, - DstLabel: dst.Labels, - DstIP: dstIP, - DstPort: dstPort, - DstType: types.K8sResourceTypeToString(dst.Type), - Protocol: protocolName, - Method: method, - Path: path, - ResponseCode: int32(resCode), - } - - return envoyAccessLog -} - -// GenerateMetricFromEnvoy Function -func GenerateMetricFromEnvoy(event *metricv3.StreamMetricsMessage, metaData map[string]interface{}) *protobuf.EnvoyMetric { - pod := LookupNetworkedResource(metaData["INSTANCE_IPS"].(string)) - envoyMetric := &protobuf.EnvoyMetric{ - PodIP: metaData["INSTANCE_IPS"].(string), - Name: metaData["NAME"].(string), - Namespace: metaData["NAMESPACE"].(string), - Labels: pod.Labels, - TimeStamp: "", - Metric: make(map[string]*protobuf.MetricValue), - } - - envoyMetric.Metric["GAUGE"] = &protobuf.MetricValue{ - Value: make(map[string]string), - } - envoyMetric.Metric["COUNTER"] = &protobuf.MetricValue{ - Value: make(map[string]string), - } - envoyMetric.Metric["HISTOGRAM"] = &protobuf.MetricValue{ - Value: make(map[string]string), - } - envoyMetric.Metric["SUMMARY"] = &protobuf.MetricValue{ - Value: make(map[string]string), - } - - for _, metric := range event.GetEnvoyMetrics() { - metricType := metric.GetType().String() - metricName := metric.GetName() - - if envoyMetric.Metric[metricType].Value == nil { - continue - } - - var metricValue string - - for _, metricDetail := range metric.GetMetric() { - if envoyMetric.TimeStamp == "" { - envoyMetric.TimeStamp = strconv.FormatInt(metricDetail.GetTimestampMs(), 10) - } - if metricType == "GAUGE" { - metricValue = strconv.FormatFloat(metricDetail.GetGauge().GetValue(), 'f', -1, 64) - } - if metricType == "COUNTER" { - metricValue = strconv.FormatFloat(metricDetail.GetCounter().GetValue(), 'f', -1, 64) - } - if metricType == "HISTOGRAM" { - metricValue = strconv.FormatUint(metricDetail.GetHistogram().GetSampleCount(), 10) - } - if metricType == "SUMMARY" { - metricValue = strconv.FormatUint(metricDetail.GetHistogram().GetSampleCount(), 10) - } - - envoyMetric.Metric[metricType].Value[metricName] = metricValue - } - } - - return envoyMetric -} diff --git a/sentryflow/core/sentryflow.go b/sentryflow/core/sentryflow.go index 969f157..30c5e5d 100644 --- a/sentryflow/core/sentryflow.go +++ b/sentryflow/core/sentryflow.go @@ -4,13 +4,20 @@ package core import ( "log" + "os" + "os/signal" "sync" + "syscall" - cfg "github.com/5GSEC/SentryFlow/config" - "github.com/5GSEC/SentryFlow/exporter" - "github.com/5GSEC/SentryFlow/metrics" + "github.com/5gsec/SentryFlow/collector" + "github.com/5gsec/SentryFlow/config" + "github.com/5gsec/SentryFlow/exporter" + "github.com/5gsec/SentryFlow/k8s" + "github.com/5gsec/SentryFlow/processor" ) +// == // + // StopChan Channel var StopChan chan struct{} @@ -19,129 +26,171 @@ func init() { StopChan = make(chan struct{}) } -// SentryFlowDaemon Structure -type SentryFlowDaemon struct { - WgDaemon *sync.WaitGroup +// SentryFlowService Structure +type SentryFlowService struct { + waitGroup *sync.WaitGroup } -// NewSentryFlowDaemon Function -func NewSentryFlowDaemon() *SentryFlowDaemon { - dm := new(SentryFlowDaemon) - - dm.WgDaemon = new(sync.WaitGroup) - - return dm +// NewSentryFlow Function +func NewSentryFlow() *SentryFlowService { + sf := new(SentryFlowService) + sf.waitGroup = new(sync.WaitGroup) + return sf } -// DestroySentryFlowDaemon Function -func (dm *SentryFlowDaemon) DestroySentryFlowDaemon() { - //metrics.StartAIEngine() - log.Printf("[SentryFlow] Started AI Engine connection") -} +// DestroySentryFlow Function +func (sf *SentryFlowService) DestroySentryFlow() { + close(StopChan) -// watchK8s Function -func (dm *SentryFlowDaemon) watchK8s() { - K8s.RunInformers(StopChan, dm.WgDaemon) -} + // Remove SentryFlow collector config from Kubernetes + if k8s.UnpatchIstioConfigMap() { + log.Print("[SentryFlow] Unpatched Istio ConfigMap") + } else { + log.Fatal("[SentryFlow] Failed to unpatch Istio ConfigMap") + } -// logProcessor Function -func (dm *SentryFlowDaemon) logProcessor() { - StartLogProcessor(dm.WgDaemon) - log.Printf("[SentryFlow] Started log processor") -} + // Stop collector + if collector.StopCollector() { + log.Print("[SentryFlow] Stopped Collectors") + } else { + log.Fatal("[SentryFlow] Failed to stop Collectors") + } -// metricAnalyzer Function -func (dm *SentryFlowDaemon) metricAnalyzer() { - metrics.StartMetricsAnalyzer(dm.WgDaemon) - log.Printf("[SentryFlow] Started metric analyzer") -} + // Stop Log Processor + if processor.StopLogProcessor() { + log.Print("[SentryFlow] Stopped Log Processors") + } else { + log.Fatal("[SentryFlow] Failed to stop Log Processors") + } -// exporterServer Function -func (dm *SentryFlowDaemon) exporterServer() { - // Initialize and start exporter server - err := exporter.Exp.InitExporterServer() - if err != nil { - log.Fatalf("[SentryFlow] Unable to initialize Exporter Server: %v", err) - return + // Stop API Aanalyzer + if processor.StopAPIAnalyzer() { + log.Print("[SentryFlow] Stopped API Analyzer") + } else { + log.Fatal("[SentryFlow] Failed to stop API Analyzer") } - err = exporter.Exp.StartExporterServer(dm.WgDaemon) - if err != nil { - log.Fatalf("[SentryFlow] Unable to start Exporter Server: %v", err) + // Stop API classifier + if processor.StopAPIClassifier() { + log.Print("[SentryFlow] Stopped API Classifier") + } else { + log.Fatal("[SentryFlow] Failed to stop API Classifier") + } + + // Stop exporter + if exporter.StopExporter() { + log.Print("[SentryFlow] Stopped Exporters") + } else { + log.Fatal("[SentryFlow] Failed to stop Exporters") } - log.Printf("[SentryFlow] Initialized exporter") -} -func (dm *SentryFlowDaemon) aiEngine() { + log.Print("[SentryFlow] Waiting for routine terminations") + sf.waitGroup.Wait() + + log.Print("[SentryFlow] Terminated SentryFlow") } -// patchK8s Function -func (dm *SentryFlowDaemon) patchK8s() error { - err := K8s.PatchIstioConfigMap() - if err != nil { - return err - } +// == // - if cfg.GlobalCfg.PatchNamespace { - err = K8s.PatchNamespaces() - if err != nil { - return err - } - } +// GetOSSigChannel Function +func GetOSSigChannel() chan os.Signal { + c := make(chan os.Signal, 1) - if cfg.GlobalCfg.PatchRestartDeployments { - err = K8s.PatchRestartDeployments() - if err != nil { - return err - } - } + signal.Notify(c, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT, + os.Interrupt) - return nil + return c } +// == // + // SentryFlow Function func SentryFlow() { - // create a daemon - dm := NewSentryFlowDaemon() + sf := NewSentryFlow() + + log.Print("[SentryFlow] Initializing SentryFlow") + + // == // // Initialize Kubernetes client - if !K8s.InitK8sClient() { - log.Printf("[Error] Failed to initialize Kubernetes client") - dm.DestroySentryFlowDaemon() + if !k8s.InitK8sClient() { + sf.DestroySentryFlow() return } - log.Printf("[SentryFlow] Initialized Kubernetes client") + // Start Kubernetes informers + k8s.RunInformers(StopChan, sf.waitGroup) + + // Patch Istio ConfigMap + if !k8s.PatchIstioConfigMap() { + sf.DestroySentryFlow() + return + } - dm.watchK8s() - log.Printf("[SentryFlow] Started to monitor Kubernetes resources") + // Patch Namespaces + if config.GlobalConfig.PatchingNamespaces { + if !k8s.PatchNamespaces() { + sf.DestroySentryFlow() + return + } + } - if dm.patchK8s() != nil { - log.Printf("[SentryFlow] Failed to patch Kubernetes") + // Patch Deployments + if config.GlobalConfig.RestartingPatchedDeployments { + if !k8s.RestartDeployments() { + sf.DestroySentryFlow() + return + } } - log.Printf("[SentryFlow] Patched Kubernetes and Istio configuration") - if !exporter.MDB.InitMetricsDBHandler() { - log.Printf("[Error] Failed to initialize Metrics DB") + // == // + + // Start collector + if !collector.StartCollector() { + sf.DestroySentryFlow() + return } - log.Printf("[SentryFlow] Successfuly initialized metrics DB") // Start log processor - dm.logProcessor() + if !processor.StartLogProcessor(sf.waitGroup) { + sf.DestroySentryFlow() + return + } - // Start metric analyzer - dm.metricAnalyzer() + // Start API analyzer + if !processor.StartAPIAnalyzer(sf.waitGroup) { + sf.DestroySentryFlow() + return + } - // Start exporter server - dm.exporterServer() + // Start API classifier + if !processor.StartAPIClassifier(sf.waitGroup) { + sf.DestroySentryFlow() + return + } - if !exporter.AH.InitAIHandler() { - log.Printf("[Error] Failed to initialize AI Engine") + // Start exporter + if !exporter.StartExporter(sf.waitGroup) { + sf.DestroySentryFlow() return } - log.Printf("[SentryFlow] Successfuly initialized AI Engine") - log.Printf("[SentryFlow] Successfully started SentryFlow") - dm.WgDaemon.Wait() + log.Print("[SentryFlow] Initialization is completed") + + // == // + + // listen for interrupt signals + sigChan := GetOSSigChannel() + <-sigChan + log.Print("Got a signal to terminate SentryFlow") + + // == // + + // Destroy SentryFlow + sf.DestroySentryFlow() } diff --git a/sentryflow/exporter/aiHandler.go b/sentryflow/exporter/aiHandler.go deleted file mode 100644 index 14a668e..0000000 --- a/sentryflow/exporter/aiHandler.go +++ /dev/null @@ -1,182 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package exporter - -import ( - "context" - "fmt" - "io" - "log" - - "google.golang.org/grpc" - - cfg "github.com/5GSEC/SentryFlow/config" - "github.com/5GSEC/SentryFlow/protobuf" - "github.com/5GSEC/SentryFlow/types" -) - -// AH Local reference for AI handler server -var AH *aiHandler - -// aiHandler Structure -type aiHandler struct { - aiHost string - aiPort string - - error chan error - stopChan chan struct{} - aggregatedLogs chan []*protobuf.APILog - apis chan []string - - aiStream *streamInform - - // @todo: add gRPC stream here for bidirectional connection -} - -// streamInform Structure -type streamInform struct { - aiStream protobuf.SentryFlowMetrics_GetAPIClassificationClient -} - -// init Function -func init() { - // Construct address and start listening - AH = NewAIHandler(cfg.AIEngineService, cfg.AIEngineServicePort) -} - -// NewAIHandler Function -func NewAIHandler(host string, port string) *aiHandler { - ah := &aiHandler{ - aiHost: host, - aiPort: port, - - stopChan: make(chan struct{}), - aggregatedLogs: make(chan []*protobuf.APILog), - apis: make(chan []string), - } - - return ah -} - -// initHandler Function -func (ah *aiHandler) InitAIHandler() bool { - addr := fmt.Sprintf("%s:%s", "10.10.0.116", cfg.GlobalCfg.AIEngineServicePort) - - // Set up a connection to the server. - conn, err := grpc.Dial(addr, grpc.WithInsecure()) - if err != nil { - log.Fatalf("could not connect: %v", err) - return false - } - - // Start serving gRPC server - log.Printf("[gRPC] Successfully connected to %s for APIMetric", addr) - - client := protobuf.NewSentryFlowMetricsClient(conn) - - aiStream, err := client.GetAPIClassification(context.Background()) - if err != nil { - log.Printf("[gRPC] Error getting API classification: %v", err) - return false - } - - AH.aiStream = &streamInform{ - aiStream: aiStream, - } - done := make(chan struct{}) - - go sendAPIRoutine() - go recvAPIRoutine(done) - - return true -} - -// InsertAPILog function -func InsertAPILog(APIs []string) { - AH.apis <- APIs -} - -// callAI Function -func (ah *aiHandler) callAI(api string) error { - // @todo: add gRPC send request - return nil -} - -// processBatch Function -func processBatch(batch []string, update bool) error { - for range batch { - - } - - return nil -} - -// performHealthCheck Function -func (ah *aiHandler) performHealthCheck() error { - return nil -} - -// disconnect Function -func (ah *aiHandler) disconnect() { - return -} - -// sendAPIRoutine Function -func sendAPIRoutine() { -routineLoop: - for { - select { - case aal, ok := <-AH.apis: - if !ok { - log.Printf("[Exporter] EnvoyMetric exporter channel closed") - break routineLoop - } - - curAPIRequest := &protobuf.APIClassificationRequest{ - Path: aal, - } - - // err := AH.aiStream.Send(curAPIRequest) - err := AH.aiStream.aiStream.Send(curAPIRequest) - if err != nil { - log.Printf("[Exporter] AI Engine APIs exporting failed %v:", err) - } - case <-AH.stopChan: - break routineLoop - } - } - - return -} - -// recvAPIRoutine Function -func recvAPIRoutine(done chan struct{}) error { - for { - select { - default: - event, err := AH.aiStream.aiStream.Recv() - if err == io.EOF { - return nil - } - - if err != nil { - log.Printf("[Envoy] Something went on wrong when receiving event: %v", err) - return err - } - - for key, value := range event.Fields { - APICount := &types.PerAPICount{ - API: key, - Count: value, - } - err := MDB.PerAPICountInsert(APICount) - if err != nil { - log.Printf("unable to insert Classified API") - return err - } - } - case <-done: - return nil - } - } -} diff --git a/sentryflow/exporter/dbHandler.go b/sentryflow/exporter/dbHandler.go deleted file mode 100644 index 103b627..0000000 --- a/sentryflow/exporter/dbHandler.go +++ /dev/null @@ -1,324 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package exporter - -import ( - "database/sql" - "log" - "os" - "path/filepath" - "time" - - cfg "github.com/5GSEC/SentryFlow/config" - "github.com/5GSEC/SentryFlow/protobuf" - "github.com/5GSEC/SentryFlow/types" - "google.golang.org/protobuf/proto" - - "github.com/mattn/go-sqlite3" -) - -// MDB global reference for Sqlite3 Handler -var MDB *MetricsDBHandler - -// MetricsDBHandler Structure -type MetricsDBHandler struct { - db *sql.DB - dbFile string - dbClearTime int -} - -// AggregationData Structure -type AggregationData struct { - Labels string - Namespace string - AccessLogs []string -} - -// init Function -func init() { - MDB = NewMetricsDBHandler() -} - -// NewMetricsDBHandler Function -func NewMetricsDBHandler() *MetricsDBHandler { - ret := &MetricsDBHandler{ - dbFile: cfg.GlobalCfg.MetricsDBFileName, - dbClearTime: cfg.GlobalCfg.MetricsDBClearTime, - } - return ret -} - -// InitMetricsDBHandler Function -func (md *MetricsDBHandler) InitMetricsDBHandler() bool { - libVersion, libVersionNumber, sourceID := sqlite3.Version() - log.Printf("[DB] Using Sqlite Version is %v %v %v", libVersion, libVersionNumber, sourceID) - log.Printf("[DB] Using DB File as %s", md.dbFile) - targetDir := filepath.Dir(md.dbFile) - _, err := os.Stat(targetDir) - if err != nil { - log.Printf("[DB] Unable to find target directory %s, creating one...", targetDir) - err := os.Mkdir(targetDir, 0750) - if err != nil { - log.Printf("[Error] Unable to create directory for metrics DB %s: %v", targetDir, err) - return false - } - } - - md.db, err = sql.Open("sqlite3", md.dbFile) - if err != nil { - log.Printf("[Error] Unable to open metrics DB: %v", err) - return false - } - - err = md.initDBTables() - if err != nil { - log.Printf("[Error] Unable to initialize metrics DB tables: %v", err) - return false - } - - go aggregationTimeTickerRoutine() - go exportTimeTickerRoutine() - go DBClearRoutine() - - return true -} - -// StopMetricsDBHandler Function -func (md *MetricsDBHandler) StopMetricsDBHandler() { - _ = md.db.Close() -} - -// initDBTables Function -func (md *MetricsDBHandler) initDBTables() error { - _, err := md.db.Exec(` - CREATE TABLE IF NOT EXISTS aggregation_table ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - labels TEXT, - namespace TEXT, - accesslog BLOB - ); - - CREATE TABLE IF NOT EXISTS per_api_metrics ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - api TEXT, - count INTEGER - ); - `) - - return err -} - -// AccessLogInsert Function -func (md *MetricsDBHandler) AccessLogInsert(data types.DbAccessLogType) error { - alData, err := proto.Marshal(data.AccessLog) - if err != nil { - return err - } - - _, err = md.db.Exec("INSERT INTO aggregation_table (labels, namespace, accesslog) VALUES (?, ?, ?)", data.Labels, data.Namespace, alData) - if err != nil { - log.Printf("INSERT accesslog error: %v", err) - return err - } - - return err -} - -// GetLabelNamespacePairs Function -func (md *MetricsDBHandler) GetLabelNamespacePairs() ([]AggregationData, error) { - query := ` - SELECT labels, namespace - FROM aggregation_table - GROUP BY labels, namespace - ` - - rows, err := md.db.Query(query) - if err != nil { - return nil, err - } - defer rows.Close() - - var pairs []AggregationData - for rows.Next() { - var labels, namespace string - err := rows.Scan(&labels, &namespace) - if err != nil { - return nil, err - } - pair := AggregationData{ - Labels: labels, - Namespace: namespace, - } - - pairs = append(pairs, pair) - } - return pairs, nil -} - -// AggregatedAccessLogSelect Function -func (md *MetricsDBHandler) AggregatedAccessLogSelect() (map[string][]*protobuf.APILog, error) { - als := make(map[string][]*protobuf.APILog) - pairs, err := md.GetLabelNamespacePairs() - if err != nil { - return nil, err - } - - query := ` - SELECT accesslog - FROM aggregation_table - WHERE labels = ? AND namespace = ? - ` - for _, pair := range pairs { - curKey := pair.Labels + pair.Namespace - rows, err := md.db.Query(query, pair.Labels, pair.Namespace) - if err != nil { - return nil, err - } - defer rows.Close() - - var accessLogs []*protobuf.APILog - for rows.Next() { - var accessLog []byte - err := rows.Scan(&accessLog) - if err != nil { - return nil, err - } - - al := &protobuf.APILog{} - err = proto.Unmarshal(accessLog, al) - - accessLogs = append(accessLogs, al) - } - als[curKey] = accessLogs - } - - return als, err -} - -// PerAPICountInsert Function -func (md *MetricsDBHandler) PerAPICountInsert(data *types.PerAPICount) error { - var existAPI int - err := md.db.QueryRow("SELECT COUNT(*) FROM per_api_metrics WHERE api = ?", data.API).Scan(&existAPI) - if err != nil { - return err - } - - if existAPI == 0 { - _, err := md.db.Exec("INSERT INTO per_api_metrics (api, count) VALUES (?, ?)", data.API, data.Count) - if err != nil { - return err - } - } else { - err := md.PerAPICountUpdate(data) - if err != nil { - return err - } - } - - return err -} - -// PerAPICountSelect Function -func (md *MetricsDBHandler) PerAPICountSelect(api string) (types.PerAPICount, error) { - var tm types.PerAPICount - - err := md.db.QueryRow("SELECT api, count FROM per_api_metrics WHERE api = ?", api).Scan(&tm.API, &tm.Count) - if err != nil { - return tm, err - } - - return tm, err -} - -// PerAPICountDelete Function -func (md *MetricsDBHandler) PerAPICountDelete(api string) error { - _, err := md.db.Exec("DELETE FROM per_api_metrics WHERE api = ?", api) - if err != nil { - return err - } - - return nil -} - -// PerAPICountUpdate Function -func (md *MetricsDBHandler) PerAPICountUpdate(data *types.PerAPICount) error { - var existAPI int - err := md.db.QueryRow("SELECT COUNT(*) FROM per_api_metrics WHERE api = ?", data.API).Scan(&existAPI) - if err != nil { - return err - } - - if existAPI > 0 { - _, err = md.db.Exec("UPDATE per_api_metrics SET count = ? WHERE api = ?", data.Count, data.API) - if err != nil { - return err - } - } - - return nil -} - -// GetAllMetrics Function -func (md *MetricsDBHandler) GetAllMetrics() (map[string]uint64, error) { - metrics := make(map[string]uint64) - - rows, err := md.db.Query("SELECT api, count FROM per_api_metrics") - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var metric types.PerAPICount - err := rows.Scan(&metric.API, &metric.Count) - if err != nil { - return nil, err - } - metrics[metric.API] = metric.Count - } - - if err := rows.Err(); err != nil { - return nil, err - } - - return metrics, nil -} - -// ClearAllTable Function -func (md *MetricsDBHandler) ClearAllTable() error { - _, err := md.db.Exec("DELETE FROM aggregation_table") - if err != nil { - log.Fatal(err) - return err - } - log.Println("Data in 'aggregation_table' deleted successfully.") - - _, err = md.db.Exec("DELETE FROM per_api_metrics") - if err != nil { - log.Fatal(err) - return err - } - log.Println("Data in 'per_api_metrics' deleted successfully.") - - return nil -} - -// DBClearRoutine Function -func DBClearRoutine() error { - ticker := time.NewTicker(time.Duration(MDB.dbClearTime) * time.Second) - - defer ticker.Stop() - - for { - select { - case <-ticker.C: - err := MDB.ClearAllTable() - if err != nil { - log.Printf("[Error] Unable to Clear DB tables: %v", err) - return err - } - } - } - - return nil -} diff --git a/sentryflow/exporter/exportAPILogs.go b/sentryflow/exporter/exportAPILogs.go new file mode 100644 index 0000000..4185b25 --- /dev/null +++ b/sentryflow/exporter/exportAPILogs.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: Apache-2.0 + +package exporter + +import ( + "errors" + "fmt" + "log" + "sort" + "strings" + + "github.com/5gsec/SentryFlow/protobuf" +) + +// == // + +// apiLogStreamInform structure +type apiLogStreamInform struct { + Hostname string + IPAddress string + + stream protobuf.SentryFlow_GetAPILogServer + + error chan error +} + +// GetAPILog Function (for gRPC) +func (exs *ExpService) GetAPILog(info *protobuf.ClientInfo, stream protobuf.SentryFlow_GetAPILogServer) error { + log.Printf("[Exporter] Client %s (%s) connected (GetAPILog)", info.HostName, info.IPAddress) + + currExporter := &apiLogStreamInform{ + Hostname: info.HostName, + IPAddress: info.IPAddress, + stream: stream, + } + + ExpH.exporterLock.Lock() + ExpH.apiLogExporters = append(ExpH.apiLogExporters, currExporter) + ExpH.exporterLock.Unlock() + + return <-currExporter.error +} + +// SendAPILogs Function +func (exp *ExpHandler) SendAPILogs(apiLog *protobuf.APILog) error { + failed := 0 + total := len(exp.apiLogExporters) + + for _, exporter := range exp.apiLogExporters { + if err := exporter.stream.Send(apiLog); err != nil { + log.Fatalf("[Exporter] Failed to export an API log to %s (%s): %v", exporter.Hostname, exporter.IPAddress, err) + failed++ + } + } + + if failed != 0 { + msg := fmt.Sprintf("[Exporter] Failed to export API logs properly (%d/%d failed)", failed, total) + return errors.New(msg) + } + + return nil +} + +// == // + +// InsertAPILog Function +func InsertAPILog(apiLog *protobuf.APILog) { + ExpH.exporterAPILogs <- apiLog + + // Make a string with labels + var labelString []string + for k, v := range apiLog.SrcLabel { + labelString = append(labelString, fmt.Sprintf("%s:%s", k, v)) + } + sort.Strings(labelString) + + // Update Stats per namespace and per labels + UpdateStats(apiLog.SrcNamespace, strings.Join(labelString, ","), apiLog.GetPath()) +} + +// == // diff --git a/sentryflow/exporter/exportAPIMetrics.go b/sentryflow/exporter/exportAPIMetrics.go new file mode 100644 index 0000000..690e11a --- /dev/null +++ b/sentryflow/exporter/exportAPIMetrics.go @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: Apache-2.0 + +package exporter + +import ( + "errors" + "fmt" + "log" + "time" + + "github.com/5gsec/SentryFlow/config" + "github.com/5gsec/SentryFlow/protobuf" +) + +// == // + +// Stats Structure +type Stats struct { + Count int +} + +// StatsPerLabel structure +type StatsPerLabel struct { + APIs map[string]Stats + LastUpdated uint64 +} + +// == // + +// apiMetricStreamInform structure +type apiMetricStreamInform struct { + Hostname string + IPAddress string + + apiMetricsStream protobuf.SentryFlow_GetAPIMetricsServer + + error chan error +} + +// GetAPIMetrics Function (for gRPC) +func (exs *ExpService) GetAPIMetrics(info *protobuf.ClientInfo, stream protobuf.SentryFlow_GetAPIMetricsServer) error { + log.Printf("[Exporter] Client %s (%s) connected (GetAPIMetrics)", info.HostName, info.IPAddress) + + currExporter := &apiMetricStreamInform{ + Hostname: info.HostName, + IPAddress: info.IPAddress, + apiMetricsStream: stream, + } + + ExpH.exporterLock.Lock() + ExpH.apiMetricsExporters = append(ExpH.apiMetricsExporters, currExporter) + ExpH.exporterLock.Unlock() + + return <-currExporter.error +} + +// SendAPIMetrics Function +func (exp *ExpHandler) SendAPIMetrics(apiMetrics *protobuf.APIMetrics) error { + failed := 0 + total := len(exp.apiMetricsExporters) + + for _, exporter := range exp.apiMetricsExporters { + if err := exporter.apiMetricsStream.Send(apiMetrics); err != nil { + log.Fatalf("[Exporter] Failed to export API metrics to %s (%s): %v", exporter.Hostname, exporter.IPAddress, err) + failed++ + } + } + + if failed != 0 { + msg := fmt.Sprintf("[Exporter] Failed to export API metrics properly (%d/%d failed)", failed, total) + return errors.New(msg) + } + + return nil +} + +// == // + +// UpdateStats Function +func UpdateStats(namespace string, label string, api string) { + ExpH.statsPerLabelLock.RLock() + defer ExpH.statsPerLabelLock.RUnlock() + + // Check if namespace+label exists + if _, ok := ExpH.statsPerLabel[namespace+label]; !ok { + ExpH.statsPerLabel[namespace+label] = StatsPerLabel{ + APIs: make(map[string]Stats), + LastUpdated: uint64(time.Now().Unix()), + } + } + + statsPerLabel := ExpH.statsPerLabel[namespace+label] + statsPerLabel.LastUpdated = uint64(time.Now().Unix()) + + // Check if API exists + if _, ok := statsPerLabel.APIs[api]; !ok { + init := Stats{ + Count: 1, + } + statsPerLabel.APIs[api] = init + } else { + stats := statsPerLabel.APIs[api] + stats.Count++ + statsPerLabel.APIs[api] = stats + } + + ExpH.statsPerLabel[namespace+label] = statsPerLabel +} + +// AggregateAPIMetrics Function +func AggregateAPIMetrics() { + ticker := time.NewTicker(time.Duration(config.GlobalConfig.AggregationPeriod) * time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + ExpH.statsPerLabelLock.RLock() + + APIMetrics := make(map[string]uint64) + + for _, statsPerLabel := range ExpH.statsPerLabel { + for api, stats := range statsPerLabel.APIs { + APIMetrics[api] = uint64(stats.Count) + } + } + + if len(APIMetrics) > 0 { + err := ExpH.SendAPIMetrics(&protobuf.APIMetrics{PerAPICounts: APIMetrics}) + if err != nil { + log.Fatalf("[Envoy] Failed to export API metrics: %v", err) + return + } + } + + ExpH.statsPerLabelLock.RUnlock() + case <-ExpH.stopChan: + return + } + } +} + +// CleanUpOutdatedStats Function +func CleanUpOutdatedStats() { + ticker := time.NewTicker(time.Duration(config.GlobalConfig.CleanUpPeriod) * time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + ExpH.statsPerLabelLock.Lock() + + cleanUpTime := uint64((time.Now().Add(-time.Duration(config.GlobalConfig.CleanUpPeriod) * time.Second)).Unix()) + labelToDelete := []string{} + + for label, statsPerLabel := range ExpH.statsPerLabel { + if statsPerLabel.LastUpdated < cleanUpTime { + labelToDelete = append(labelToDelete, label) + } + } + + for _, label := range labelToDelete { + delete(ExpH.statsPerLabel, label) + } + + ExpH.statsPerLabelLock.Unlock() + case <-ExpH.stopChan: + return + } + } +} + +// == // + +// Exporting API metrics is handled by API Classifier + +// == // diff --git a/sentryflow/exporter/exportEnvoyMetrics.go b/sentryflow/exporter/exportEnvoyMetrics.go new file mode 100644 index 0000000..184004a --- /dev/null +++ b/sentryflow/exporter/exportEnvoyMetrics.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 + +package exporter + +import ( + "errors" + "fmt" + "log" + + "github.com/5gsec/SentryFlow/protobuf" +) + +// == // + +// envoyMetricsStreamInform structure +type envoyMetricsStreamInform struct { + Hostname string + IPAddress string + + metricsStream protobuf.SentryFlow_GetEnvoyMetricsServer + + error chan error +} + +// GetEnvoyMetrics Function (for gRPC) +func (exs *ExpService) GetEnvoyMetrics(info *protobuf.ClientInfo, stream protobuf.SentryFlow_GetEnvoyMetricsServer) error { + log.Printf("[Exporter] Client %s (%s) connected (GetEnvoyMetrics)", info.HostName, info.IPAddress) + + currExporter := &envoyMetricsStreamInform{ + Hostname: info.HostName, + IPAddress: info.IPAddress, + metricsStream: stream, + } + + ExpH.exporterLock.Lock() + ExpH.envoyMetricsExporters = append(ExpH.envoyMetricsExporters, currExporter) + ExpH.exporterLock.Unlock() + + return <-currExporter.error +} + +// SendEnvoyMetrics Function +func (exp *ExpHandler) SendEnvoyMetrics(evyMetrics *protobuf.EnvoyMetrics) error { + failed := 0 + total := len(exp.envoyMetricsExporters) + + for _, exporter := range exp.envoyMetricsExporters { + if err := exporter.metricsStream.Send(evyMetrics); err != nil { + log.Fatalf("[Exporter] Failed to export Envoy metrics to %s(%s): %v", exporter.Hostname, exporter.IPAddress, err) + failed++ + } + } + + if failed != 0 { + msg := fmt.Sprintf("[Exporter] Failed to export Envoy metrics properly (%d/%d failed)", failed, total) + return errors.New(msg) + } + + return nil +} + +// == // + +// InsertEnvoyMetrics Function +func InsertEnvoyMetrics(evyMetrics *protobuf.EnvoyMetrics) { + ExpH.exporterMetrics <- evyMetrics +} + +// == // diff --git a/sentryflow/exporter/exporterHandler.go b/sentryflow/exporter/exporterHandler.go index 598e76e..705b7f8 100644 --- a/sentryflow/exporter/exporterHandler.go +++ b/sentryflow/exporter/exporterHandler.go @@ -3,414 +3,223 @@ package exporter import ( - "errors" "fmt" "net" - "sort" - "strings" "sync" - "time" - cfg "github.com/5GSEC/SentryFlow/config" - "github.com/5GSEC/SentryFlow/protobuf" - "github.com/5GSEC/SentryFlow/types" + "github.com/5gsec/SentryFlow/config" + "github.com/5gsec/SentryFlow/protobuf" + + "log" - "github.com/emicklei/go-restful/v3/log" "google.golang.org/grpc" ) -// Exp global reference for Exporter Handler -var Exp *Handler +// == // + +// ExpH global reference for Exporter Handler +var ExpH *ExpHandler // init Function func init() { - Exp = NewExporterHandler() + ExpH = NewExporterHandler() } -// Handler structure -type Handler struct { - baseExecutionID uint64 - currentLogCount uint64 - agTime int - exTime int - stopChan chan struct{} - lock sync.Mutex - exporters []*Inform - apiMetricExporters []*apiMetricStreamInform - metricExporters []*metricStreamInform - exporterLock sync.Mutex - exporterLogs chan *protobuf.APILog - exporterAPIMetrics chan *protobuf.APIMetric - exporterMetrics chan *protobuf.EnvoyMetric - - listener net.Listener - gRPCServer *grpc.Server -} +// ExpHandler structure +type ExpHandler struct { + exporterService net.Listener + grpcServer *grpc.Server + grpcService *ExpService -// Inform structure -type Inform struct { - stream protobuf.SentryFlow_GetLogServer - error chan error - Hostname string - IPAddress string -} + apiLogExporters []*apiLogStreamInform + apiMetricsExporters []*apiMetricStreamInform + envoyMetricsExporters []*envoyMetricsStreamInform -// apiMetricStreamInform structure -type apiMetricStreamInform struct { - apiMetricStream protobuf.SentryFlow_GetAPIMetricsServer - error chan error - Hostname string - IPAddress string -} + exporterLock sync.Mutex -// metricStreamInform structure -type metricStreamInform struct { - metricStream protobuf.SentryFlow_GetEnvoyMetricsServer - error chan error - Hostname string - IPAddress string -} + exporterAPILogs chan *protobuf.APILog + exporterAPIMetrics chan *protobuf.APIMetrics + exporterMetrics chan *protobuf.EnvoyMetrics -// NewExporterHandler Function -func NewExporterHandler() *Handler { - exp := &Handler{ - baseExecutionID: uint64(time.Now().UnixMicro()), - currentLogCount: 0, - agTime: cfg.GlobalCfg.MetricsDBAggregationTime, - exTime: cfg.GlobalCfg.APIMetricsSendTime, - exporters: make([]*Inform, 0), - stopChan: make(chan struct{}), - lock: sync.Mutex{}, - exporterLock: sync.Mutex{}, - exporterLogs: make(chan *protobuf.APILog), - exporterAPIMetrics: make(chan *protobuf.APIMetric), - exporterMetrics: make(chan *protobuf.EnvoyMetric), - } + statsPerLabel map[string]StatsPerLabel + statsPerLabelLock sync.RWMutex - return exp + stopChan chan struct{} } -// InsertAccessLog Function -func InsertAccessLog(al *protobuf.APILog) { - // Avoid race condition for currentLogCount, otherwise we might have duplicate IDs - Exp.lock.Lock() - al.Id = Exp.baseExecutionID + Exp.currentLogCount - Exp.currentLogCount++ - Exp.lock.Unlock() - - go saveAccessLog(al) // go routine?? - Exp.exporterLogs <- al +// ExpService Structure +type ExpService struct { + protobuf.UnimplementedSentryFlowServer } -func saveAccessLog(al *protobuf.APILog) { - curLabels := al.SrcLabel +// == // + +// NewExporterHandler Function +func NewExporterHandler() *ExpHandler { + exp := &ExpHandler{ + grpcService: new(ExpService), - var labelString []string + apiLogExporters: make([]*apiLogStreamInform, 0), + apiMetricsExporters: make([]*apiMetricStreamInform, 0), + envoyMetricsExporters: make([]*envoyMetricsStreamInform, 0), - for key, value := range curLabels { - labelString = append(labelString, fmt.Sprintf("%s:%s", key, value)) - } + exporterLock: sync.Mutex{}, - sort.Strings(labelString) + exporterAPILogs: make(chan *protobuf.APILog), + exporterAPIMetrics: make(chan *protobuf.APIMetrics), + exporterMetrics: make(chan *protobuf.EnvoyMetrics), - curData := types.DbAccessLogType{ - Labels: strings.Join(labelString, " "), - Namespace: al.SrcNamespace, - AccessLog: al, - } + statsPerLabel: make(map[string]StatsPerLabel), + statsPerLabelLock: sync.RWMutex{}, - err := MDB.AccessLogInsert(curData) - if err != nil { - log.Printf("unable to insert AccessLog") - return + stopChan: make(chan struct{}), } -} -// InsertEnvoyMetric Function -func InsertEnvoyMetric(em *protobuf.EnvoyMetric) { - Exp.exporterMetrics <- em + return exp } -// InitExporterServer Function -func (exp *Handler) InitExporterServer() error { - listenAddr := fmt.Sprintf("%s:%s", cfg.GlobalCfg.CustomExportListenAddr, cfg.GlobalCfg.CustomExportListenPort) +// == // - // Start listening - lis, err := net.Listen("tcp", listenAddr) +// StartExporter Function +func StartExporter(wg *sync.WaitGroup) bool { + // Make a string with the given exporter address and port + exporterService := fmt.Sprintf("%s:%s", config.GlobalConfig.ExporterAddr, config.GlobalConfig.ExporterPort) + + // Start listening gRPC port + expService, err := net.Listen("tcp", exporterService) if err != nil { - msg := fmt.Sprintf("unable to listen at %s: %v", listenAddr, err) - return errors.New(msg) + log.Fatalf("[Exporter] Failed to listen at %s: %v", exporterService, err) + return false } + ExpH.exporterService = expService + + log.Printf("[Exporter] Listening Exporter gRPC services (%s)", exporterService) // Create gRPC server - server := grpc.NewServer() - protobuf.RegisterSentryFlowServer(server, exs) + gRPCServer := grpc.NewServer() + ExpH.grpcServer = gRPCServer - exp.listener = lis - exp.gRPCServer = server + protobuf.RegisterSentryFlowServer(gRPCServer, ExpH.grpcService) - log.Printf("[Exporter] Exporter listening at %s", listenAddr) - return nil -} + log.Printf("[Exporter] Initialized Exporter gRPC services") -// StartExporterServer Function -func (exp *Handler) StartExporterServer(wg *sync.WaitGroup) error { - log.Printf("[Exporter] Starting exporter server") - var err error - err = nil + // Serve gRPC Service + go ExpH.grpcServer.Serve(ExpH.exporterService) - go exp.exportRoutine(wg) - - go func() { - wg.Add(1) - // Serve is blocking function - err = exp.gRPCServer.Serve(exp.listener) - if err != nil { - wg.Done() - return - } + log.Printf("[Exporter] Serving Exporter gRPC services (%s)", exporterService) - wg.Done() - }() + // Export APILogs + go ExpH.exportAPILogs(wg) - return err -} + log.Printf("[Exporter] Exporting API logs through gRPC services") -// exportRoutine Function -func (exp *Handler) exportRoutine(wg *sync.WaitGroup) { - wg.Add(1) - log.Printf("[Exporter] Starting export routine") + // Export APIMetrics + go ExpH.exportAPIMetrics(wg) -routineLoop: - for { - select { - // @todo add more channels for this - case al, ok := <-exp.exporterLogs: - if !ok { - log.Printf("[Exporter] Log exporter channel closed") - break routineLoop - } + log.Printf("[Exporter] Exporting API metrics through gRPC services") - err := exp.sendLogs(al) - if err != nil { - log.Printf("[Exporter] Log exporting failed %v:", err) - } + // Export EnvoyMetrics + go ExpH.exportEnvoyMetrics(wg) - case em, ok := <-exp.exporterMetrics: - if !ok { - log.Printf("[Exporter] EnvoyMetric exporter channel closed") - break routineLoop - } + log.Printf("[Exporter] Exporting Envoy metrics through gRPC services") - err := exp.sendMetrics(em) - if err != nil { - log.Printf("[Exporter] Metric exporting failed %v:", err) - } + // Start Export Time Ticker Routine + go AggregateAPIMetrics() + go CleanUpOutdatedStats() - case am, ok := <-exp.exporterAPIMetrics: - if !ok { - log.Printf("[Exporter] APIMetric exporter channel closed") - break routineLoop - } - err := exp.sendAPIMetrics(am) - if err != nil { - log.Printf("[Exporter] APIMetric exporting failed %v:", err) - } + return true +} - case <-exp.stopChan: - break routineLoop - } - } +// StopExporter Function +func StopExporter() bool { + // One for exportAPILogs + ExpH.stopChan <- struct{}{} - defer wg.Done() - return -} + // One for exportAPIMetrics + ExpH.stopChan <- struct{}{} -// sendLogs Function -func (exp *Handler) sendLogs(l *protobuf.APILog) error { - exp.exporterLock.Lock() - defer exp.exporterLock.Unlock() - - // iterate and send logs - failed := 0 - total := len(exp.exporters) - for _, exporter := range exp.exporters { - curRetry := 0 - - // @todo: make max retry count per logs using config - // @todo: make max retry count per single exporter before removing the exporter using config - var err error - for curRetry < 3 { - err = exporter.stream.Send(l) - if err != nil { - log.Printf("[Exporter] Unable to send log to %s(%s) retry=%d/%d: %v", - exporter.Hostname, exporter.IPAddress, curRetry, 3, err) - curRetry++ - } else { - break - } - } + // One for exportEnvoyMetrics + ExpH.stopChan <- struct{}{} - // Count failed - if err != nil { - failed++ - } - } + // Stop gRPC server + ExpH.grpcServer.GracefulStop() - // notify failed count - if failed != 0 { - msg := fmt.Sprintf("unable to send logs properly %d/%d failed", failed, total) - return errors.New(msg) - } + log.Printf("[Exporter] Gracefully stopped Exporter gRPC services") - return nil + return true } -// sendMetrics Function -func (exp *Handler) sendMetrics(l *protobuf.EnvoyMetric) error { - exp.exporterLock.Lock() - defer exp.exporterLock.Unlock() - - // iterate and send logs - failed := 0 - total := len(exp.metricExporters) - for _, exporter := range exp.metricExporters { - curRetry := 0 - - // @todo: make max retry count per logs using config - // @todo: make max retry count per single exporter before removing the exporter using config - var err error - for curRetry < 3 { - err = exporter.metricStream.Send(l) - if err != nil { - log.Printf("[Exporter] Unable to send metric to %s(%s) retry=%d/%d: %v", - exporter.Hostname, exporter.IPAddress, curRetry, 3, err) - curRetry++ - } else { - break - } - } +// == // - // Count failed - if err != nil { - failed++ - } - } - - // notify failed count - if failed != 0 { - msg := fmt.Sprintf("unable to send metrics properly %d/%d failed", failed, total) - return errors.New(msg) - } +// exportAPILogs Function +func (exp *ExpHandler) exportAPILogs(wg *sync.WaitGroup) { + wg.Add(1) - return nil -} + for { + select { + case apiLog, ok := <-exp.exporterAPILogs: + if !ok { + log.Fatalf("[Exporter] Log exporter channel closed") + wg.Done() + return + } -// sendAPIMetrics Function -func (exp *Handler) sendAPIMetrics(l *protobuf.APIMetric) error { - exp.exporterLock.Lock() - defer exp.exporterLock.Unlock() - - // iterate and send logs - failed := 0 - total := len(exp.apiMetricExporters) - for _, exporter := range exp.apiMetricExporters { - curRetry := 0 - - // @todo: make max retry count per logs using config - // @todo: make max retry count per single exporter before removing the exporter using config - var err error - for curRetry < 3 { - err = exporter.apiMetricStream.Send(l) - if err != nil { - log.Printf("[Exporter] Unable to send metric to %s(%s) retry=%d/%d: %v", - exporter.Hostname, exporter.IPAddress, curRetry, 3, err) - curRetry++ - } else { - break + if err := exp.SendAPILogs(apiLog); err != nil { + log.Fatalf("[Exporter] Failed to export API Logs: %v", err) } - } - // Count failed - if err != nil { - failed++ + case <-exp.stopChan: + wg.Done() + return } } - - // notify failed count - if failed != 0 { - msg := fmt.Sprintf("unable to send metrics properly %d/%d failed", failed, total) - return errors.New(msg) - } - - return nil } -// APIMetricsExportRoutine function -func (exp *Handler) APIMetricsExportRoutine() { - -} - -// aggregationTimeTickerRoutine Function -func aggregationTimeTickerRoutine() error { - aggregationTicker := time.NewTicker(time.Duration(Exp.agTime) * time.Second) - - defer aggregationTicker.Stop() +// exportAPIMetrics Function +func (exp *ExpHandler) exportAPIMetrics(wg *sync.WaitGroup) { + wg.Add(1) for { select { - case <-aggregationTicker.C: - als, err := MDB.AggregatedAccessLogSelect() - if err != nil { - log.Printf("[Exporter] AccessLog Aggregation %v", err) - return err + case apiMetrics, ok := <-exp.exporterAPIMetrics: + if !ok { + log.Fatalf("[Exporter] APIMetric exporter channel closed") + wg.Done() + return } - - for _, val := range als { - // export part - curAPIs := []string{} - for _, APILog := range val { - curAPIs = append(curAPIs, APILog.Path) - } - InsertAPILog(curAPIs) + if err := exp.SendAPIMetrics(apiMetrics); err != nil { + log.Fatalf("[Exporter] Failed to export API metrics: %v", err) } + + case <-exp.stopChan: + wg.Done() + return } } } -// exportTimeTickerRoutine Function -func exportTimeTickerRoutine() error { - apiMetricTicker := time.NewTicker(time.Duration(Exp.exTime) * time.Second) - - defer apiMetricTicker.Stop() +// exportEnvoyMetrics Function +func (exp *ExpHandler) exportEnvoyMetrics(wg *sync.WaitGroup) { + wg.Add(1) for { select { - case <-apiMetricTicker.C: - curAPIMetrics, err := MDB.GetAllMetrics() - - if err != nil { - log.Printf("[Exporter] APIMetric TimeTicker channel closed") - return err + case evyMetrics, ok := <-exp.exporterMetrics: + if !ok { + log.Fatalf("[Exporter] EnvoyMetric exporter channel closed") + wg.Done() + return } - if len(curAPIMetrics) > 0 { - curAPIMetric := &protobuf.APIMetric{ - PerAPICounts: curAPIMetrics, - } - Exp.exporterAPIMetrics <- curAPIMetric + if err := exp.SendEnvoyMetrics(evyMetrics); err != nil { + log.Fatalf("[Exporter] Failed to export Envoy metrics: %v", err) } + + case <-exp.stopChan: + wg.Done() + return } } } -// StopExporterServer Function -func (exp *Handler) StopExporterServer() { - // Gracefully stop all client connections - exp.stopChan <- struct{}{} - - // Gracefully stop gRPC Server - exp.gRPCServer.GracefulStop() - - log.Printf("[Exporter] Stopped exporter server") -} +// == // diff --git a/sentryflow/exporter/exporterServer.go b/sentryflow/exporter/exporterServer.go deleted file mode 100644 index 71f0c40..0000000 --- a/sentryflow/exporter/exporterServer.go +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package exporter - -import ( - "log" - - "github.com/5GSEC/SentryFlow/protobuf" -) - -var exs *Server - -// init Function -func init() { - exs = NewExporterServer() -} - -// Server Structure -type Server struct { - protobuf.UnimplementedSentryFlowServer // @todo: make this fixed. -} - -// NewExporterServer Function -func NewExporterServer() *Server { - return new(Server) -} - -// GetLog Function -func (exs *Server) GetLog(info *protobuf.ClientInfo, stream protobuf.SentryFlow_GetLogServer) error { - log.Printf("[Exporter] Client %s(%s) connected (GetLog)", info.HostName, info.IPAddress) - - curExporter := &Inform{ - stream: stream, - Hostname: info.HostName, - IPAddress: info.IPAddress, - } - - // Append new exporter client for future use - Exp.exporterLock.Lock() - Exp.exporters = append(Exp.exporters, curExporter) - Exp.exporterLock.Unlock() - - // Keeping gRPC stream alive - // refer https://stackoverflow.com/questions/36921131/ - return <-curExporter.error -} - -// GetEnvoyMetrics Function -func (exs *Server) GetEnvoyMetrics(info *protobuf.ClientInfo, stream protobuf.SentryFlow_GetEnvoyMetricsServer) error { - log.Printf("[Exporter] Client %s(%s) connected (GetEnvoyMetrics)", info.HostName, info.IPAddress) - - curExporter := &metricStreamInform{ - metricStream: stream, - Hostname: info.HostName, - IPAddress: info.IPAddress, - } - - // Append new exporter client for future use - Exp.exporterLock.Lock() - Exp.metricExporters = append(Exp.metricExporters, curExporter) - Exp.exporterLock.Unlock() - - // Keeping gRPC stream alive - // refer https://stackoverflow.com/questions/36921131/ - return <-curExporter.error -} - -// GetAPIMetrics Function -func (exs *Server) GetAPIMetrics(info *protobuf.ClientInfo, stream protobuf.SentryFlow_GetAPIMetricsServer) error { - log.Printf("[Exporter] Client %s(%s) connected (GetAPIMetrics)", info.HostName, info.IPAddress) - - curExporter := &apiMetricStreamInform{ - apiMetricStream: stream, - Hostname: info.HostName, - IPAddress: info.IPAddress, - } - - Exp.exporterLock.Lock() - Exp.apiMetricExporters = append(Exp.apiMetricExporters, curExporter) - Exp.exporterLock.Unlock() - - return <-curExporter.error -} diff --git a/sentryflow/go.mod b/sentryflow/go.mod index 79e87b1..4a422de 100644 --- a/sentryflow/go.mod +++ b/sentryflow/go.mod @@ -1,18 +1,15 @@ -module github.com/5GSEC/SentryFlow +module github.com/5gsec/SentryFlow go 1.21 -replace github.com/5GSEC/SentryFlow/protobuf => ../protobuf +replace github.com/5gsec/SentryFlow/protobuf => ../protobuf require ( - github.com/5GSEC/SentryFlow/protobuf v0.0.0-00010101000000-000000000000 - github.com/emicklei/go-restful/v3 v3.11.0 + github.com/5gsec/SentryFlow/protobuf v0.0.0-00010101000000-000000000000 github.com/envoyproxy/go-control-plane v0.12.0 - github.com/mattn/go-sqlite3 v1.14.22 github.com/spf13/viper v1.18.2 go.opentelemetry.io/proto/otlp v1.0.0 google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 @@ -22,6 +19,7 @@ require ( require ( github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.3.0 // indirect @@ -56,16 +54,17 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/sentryflow/go.sum b/sentryflow/go.sum index d45125b..0927d6d 100644 --- a/sentryflow/go.sum +++ b/sentryflow/go.sum @@ -67,8 +67,6 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -140,8 +138,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -155,12 +153,12 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -192,8 +190,8 @@ google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/sentryflow/k8s/istioPatcher.go b/sentryflow/k8s/istioPatcher.go new file mode 100644 index 0000000..83d03c6 --- /dev/null +++ b/sentryflow/k8s/istioPatcher.go @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: Apache-2.0 + +package k8s + +import ( + "errors" + "log" + + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/util/json" +) + +// meshConfig structure +type meshConfig struct { + DefaultConfig struct { + DiscoveryAddress string `yaml:"discoveryAddress"` + EnvoyAccessLogService struct { + Address string `yaml:"address"` + } `yaml:"envoyAccessLogService"` + EnvoyMetricsService struct { + Address string `yaml:"address"` + } `yaml:"envoyMetricsService"` + } `yaml:"defaultConfig"` + + DefaultProviders struct { + AccessLogs []string `yaml:"accessLogs"` + Metrics []string `yaml:"metrics"` + } `yaml:"defaultProviders"` + + EnableEnvoyAccessLogService bool `yaml:"enableEnvoyAccessLogService"` + + ExtensionProviders []struct { + EnvoyOtelAls struct { + Port string `yaml:"port"` + Service string `yaml:"service"` + } `yaml:"envoyOtelAls"` + Name string `yaml:"name"` + } `yaml:"extensionProviders"` + + ExtraFields map[string]interface{} `yaml:",inline"` // all extra fields that SentryFlow will not touch +} + +// PatchIstioConfigMap Function +func PatchIstioConfigMap() bool { + log.Print("[PatchIstioConfigMap] Patching Istio ConfigMap") + + meshCfg, err := parseIstioConfigMap() + if err != nil { + log.Fatalf("[PatchIstioConfigMap] Unable to parse Istio ConfigMap: %v", err) + return false + } + + if isIstioAlreadyPatched(meshCfg) { + log.Print("[PatchIstioConfigMap] Istio ConfigMap was already patched before, skipping...") + return true + } + + // set metrics and envoy access logging to Sentryflow + meshCfg.DefaultConfig.EnvoyAccessLogService.Address = "sentryflow.sentryflow.svc.cluster.local:4317" + meshCfg.DefaultConfig.EnvoyMetricsService.Address = "sentryflow.sentryflow.svc.cluster.local:4317" + + // add Sentryflow as Otel AL collector + if patched, _ := isEnvoyOtelAlPatched(meshCfg); !patched { + sfOtelAl := struct { + EnvoyOtelAls struct { + Port string `yaml:"port"` + Service string `yaml:"service"` + } `yaml:"envoyOtelAls"` + Name string `yaml:"name"` + }{ + EnvoyOtelAls: struct { + Port string `yaml:"port"` + Service string `yaml:"service"` + }{ + Port: "4317", + Service: "sentryflow.sentryflow.svc.cluster.local", + }, + Name: "sentryflow", + } + meshCfg.ExtensionProviders = append(meshCfg.ExtensionProviders, sfOtelAl) + } + + // add default access log provider + if patched, _ := isEnvoyALProviderPatched(meshCfg); !patched { + meshCfg.DefaultProviders.AccessLogs = append(meshCfg.DefaultProviders.AccessLogs, "sentryflow") + } + + meshCfg.EnableEnvoyAccessLogService = true + + yamlMeshCfg, err := yaml.Marshal(meshCfg) + if err != nil { + log.Fatalf("[PatchIstioConfigMap] Unable to unmarshall Istio ConfigMap: %v", err) + return false + } + + strMeshCfg := string(yamlMeshCfg[:]) + err = K8sH.updateConfigMap("istio-system", "istio", strMeshCfg) + if err != nil { + log.Fatalf("[PatchIstioConfigMap] Unable to update Istio ConfigMap: %v", err) + return false + } + + log.Print("[PatchIstioConfigMap] Successfully patched Istio ConfigMap") + + return true +} + +// UnpatchIstioConfigMap Function +func UnpatchIstioConfigMap() bool { + log.Print("[PatchIstioConfigMap] Unpatching Istio ConfigMap") + + meshCfg, err := parseIstioConfigMap() + if err != nil { + log.Fatalf("[PatchIstioConfigMap] Unable to parse Istio ConfigMap: %v", err) + return false + } + + // set metrics and envoy access logging back to empty value + meshCfg.DefaultConfig.EnvoyAccessLogService.Address = "" + meshCfg.DefaultConfig.EnvoyMetricsService.Address = "" + + // remove EnvoyOtelAl + if patched, targetIdx := isEnvoyOtelAlPatched(meshCfg); patched { + tmp := make([]struct { + EnvoyOtelAls struct { + Port string `yaml:"port"` + Service string `yaml:"service"` + } `yaml:"envoyOtelAls"` + Name string `yaml:"name"` + }, 0) + for idx, envoyOtelAl := range meshCfg.ExtensionProviders { + if idx != targetIdx { + tmp = append(tmp, envoyOtelAl) + } + } + meshCfg.ExtensionProviders = tmp + } + + // remove default access log provider + if patched, targetIdx := isEnvoyALProviderPatched(meshCfg); patched { + tmp := make([]string, 0) + for idx, provider := range meshCfg.DefaultProviders.AccessLogs { + if idx != targetIdx { + tmp = append(tmp, provider) + } + } + meshCfg.DefaultProviders.AccessLogs = tmp + } + + // @todo this might be incorrect, the user might have just set up envoy access log service manually before. + // @todo check if this shall actually be overwritten by SentryFlow + // meshCfg.EnableEnvoyAccessLogService = false + + yamlMeshCfg, err := yaml.Marshal(meshCfg) + if err != nil { + log.Fatalf("[PatchIstioConfigMap] Unable to unmarshall Istio ConfigMap: %v", err) + return false + } + + strMeshCfg := string(yamlMeshCfg[:]) + err = K8sH.updateConfigMap("istio-system", "istio", strMeshCfg) + if err != nil { + log.Fatalf("[PatchIstioConfigMap] Unable to update Istio ConfigMap: %v", err) + return false + } + + log.Print("[PatchIstioConfigMap] Successfully unpatched Istio ConfigMap") + + return true +} + +// parseIstioConfigMap Function +func parseIstioConfigMap() (meshConfig, error) { + var meshCfg meshConfig + + configMapData, err := K8sH.getConfigMap("istio-system", "istio") + if err != nil { + return meshCfg, err + } + + // unmarshall JSON format of Istio config + var rawIstioCfg map[string]interface{} + err = json.Unmarshal([]byte(configMapData), &rawIstioCfg) + if err != nil { + return meshCfg, err + } + + // extract mesh field from configmap + meshData, ok := rawIstioCfg["mesh"].(string) + if !ok { + return meshCfg, errors.New("[PatchIstioConfigMap] Unable to find field \"mesh\" from Istio config") + } + + // unmarshall YAML format of Istio config + err = yaml.Unmarshal([]byte(meshData), &meshCfg) + if err != nil { + return meshCfg, err + } + + return meshCfg, nil +} + +// isEnvoyOtelAlPatched Function +func isEnvoyOtelAlPatched(meshCfg meshConfig) (bool, int) { + for idx, envoyOtelAl := range meshCfg.ExtensionProviders { + if envoyOtelAl.Name == "sentryflow" && + envoyOtelAl.EnvoyOtelAls.Port == "4317" && + envoyOtelAl.EnvoyOtelAls.Service == "sentryflow.sentryflow.svc.cluster.local" { + return true, idx + } + } + + return false, -1 +} + +// isEnvoyALProviderPatched Function +func isEnvoyALProviderPatched(meshCfg meshConfig) (bool, int) { + for idx, accessLogProvider := range meshCfg.DefaultProviders.AccessLogs { + if accessLogProvider == "sentryflow" { + return true, idx + } + } + return false, -1 +} + +// isIstioAlreadyPatched Function +func isIstioAlreadyPatched(meshCfg meshConfig) bool { + if meshCfg.DefaultConfig.EnvoyAccessLogService.Address != "sentryflow.sentryflow.svc.cluster.local:4317" || + meshCfg.DefaultConfig.EnvoyMetricsService.Address != "sentryflow.sentryflow.svc.cluster.local:4317" { + return false + } + + if patched, _ := isEnvoyOtelAlPatched(meshCfg); !patched { + return false + } + + if patched, _ := isEnvoyALProviderPatched(meshCfg); !patched { + return false + } + + if !meshCfg.EnableEnvoyAccessLogService { + return false + } + + return true +} diff --git a/sentryflow/k8s/k8sHandler.go b/sentryflow/k8s/k8sHandler.go new file mode 100644 index 0000000..867c3e4 --- /dev/null +++ b/sentryflow/k8s/k8sHandler.go @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: Apache-2.0 + +package k8s + +import ( + "context" + "errors" + "log" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/json" + + "github.com/5gsec/SentryFlow/types" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +// == // + +// K8sH global reference for Kubernetes Handler +var K8sH *KubernetesHandler + +// init Function +func init() { + K8sH = NewK8sHandler() +} + +// KubernetesHandler Structure +type KubernetesHandler struct { + config *rest.Config + clientSet *kubernetes.Clientset + + watchers map[string]*cache.ListWatch + informers map[string]cache.Controller + + podMap map[string]*corev1.Pod // NOT thread safe + serviceMap map[string]*corev1.Service // NOT thread safe +} + +// NewK8sHandler Function +func NewK8sHandler() *KubernetesHandler { + kh := &KubernetesHandler{ + watchers: make(map[string]*cache.ListWatch), + informers: make(map[string]cache.Controller), + + podMap: make(map[string]*corev1.Pod), + serviceMap: make(map[string]*corev1.Service), + } + + return kh +} + +// == // + +// InitK8sClient Function +func InitK8sClient() bool { + var err error + + // Initialize in cluster config + K8sH.config, err = rest.InClusterConfig() + if err != nil { + log.Fatal("[InitK8sClient] Failed to initialize Kubernetes client") + return false + } + + // Initialize Kubernetes clientSet + K8sH.clientSet, err = kubernetes.NewForConfig(K8sH.config) + if err != nil { + log.Fatal("[InitK8sClient] Failed to initialize Kubernetes client") + return false + } + + // Create a mapping table for existing pods and services to IPs + K8sH.initExistingResources() + + watchTargets := []string{"pods", "services"} + + // Initialize watchers for pods and services + for _, target := range watchTargets { + watcher := cache.NewListWatchFromClient( + K8sH.clientSet.CoreV1().RESTClient(), + target, + corev1.NamespaceAll, + fields.Everything(), + ) + K8sH.watchers[target] = watcher + } + + // Initialize informers + K8sH.initInformers() + + log.Print("[InitK8sClient] Initialized Kubernetes client") + + return true +} + +// initExistingResources Function that creates a mapping table for existing pods and services to IPs +// This is required since informers are NOT going to see existing resources until they are updated, created or deleted +// @todo: Refactor this function, this is kind of messy +func (k8s *KubernetesHandler) initExistingResources() { + // List existing Pods + podList, err := k8s.clientSet.CoreV1().Pods(corev1.NamespaceAll).List(context.TODO(), v1.ListOptions{}) + if err != nil { + log.Fatalf("[K8s] Failed to get Pods: %v", err.Error()) + } + + // Add existing Pods to the podMap + for _, pod := range podList.Items { + currentPod := pod + k8s.podMap[pod.Status.PodIP] = ¤tPod + log.Printf("[K8s] Add existing pod %s: %s/%s", pod.Status.PodIP, pod.Namespace, pod.Name) + } + + // List existing Services + serviceList, err := k8s.clientSet.CoreV1().Services(corev1.NamespaceAll).List(context.TODO(), v1.ListOptions{}) + if err != nil { + log.Fatalf("[K8s] Failed to get Services: %v", err.Error()) + } + + // Add existing Services to the serviceMap + for _, service := range serviceList.Items { + currentService := service + + // Check if the service has a LoadBalancer type + if service.Spec.Type == "LoadBalancer" { + for _, lbIngress := range service.Status.LoadBalancer.Ingress { + lbIP := lbIngress.IP + if lbIP != "" { + k8s.serviceMap[lbIP] = ¤tService + log.Printf("[K8s] Add existing service (LoadBalancer) %s: %s/%s", lbIP, service.Namespace, service.Name) + } + } + } else { + k8s.serviceMap[service.Spec.ClusterIP] = ¤tService + if len(service.Spec.ExternalIPs) != 0 { + for _, eIP := range service.Spec.ExternalIPs { + k8s.serviceMap[eIP] = ¤tService + log.Printf("[K8s] Add existing service %s: %s/%s", eIP, service.Namespace, service.Name) + } + } + } + } +} + +// initInformers Function that initializes informers for services and pods in a cluster +func (k8s *KubernetesHandler) initInformers() { + // Create Pod controller informer + _, pc := cache.NewInformer( + k8s.watchers["pods"], + &corev1.Pod{}, + time.Second*0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { // Add pod + pod := obj.(*corev1.Pod) + k8s.podMap[pod.Status.PodIP] = pod + }, + UpdateFunc: func(oldObj, newObj interface{}) { // Update pod + newPod := newObj.(*corev1.Pod) + k8s.podMap[newPod.Status.PodIP] = newPod + }, + DeleteFunc: func(obj interface{}) { // Remove deleted pod + pod := obj.(*corev1.Pod) + delete(k8s.podMap, pod.Status.PodIP) + }, + }, + ) + k8s.informers["pods"] = pc + + // Create Service controller informer + _, sc := cache.NewInformer( + k8s.watchers["services"], + &corev1.Service{}, + time.Second*0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { // Add service + service := obj.(*corev1.Service) + + if service.Spec.Type == "LoadBalancer" { + for _, lbIngress := range service.Status.LoadBalancer.Ingress { + lbIP := lbIngress.IP + if lbIP != "" { + k8s.serviceMap[lbIP] = service + } + } + } else { + k8s.serviceMap[service.Spec.ClusterIP] = service + if len(service.Spec.ExternalIPs) != 0 { + for _, eIP := range service.Spec.ExternalIPs { + k8s.serviceMap[eIP] = service + } + } + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { // Update service + newService := newObj.(*corev1.Service) + if newService.Spec.Type == "LoadBalancer" { + for _, lbIngress := range newService.Status.LoadBalancer.Ingress { + lbIP := lbIngress.IP + if lbIP != "" { + k8s.serviceMap[lbIP] = newService + } + } + } else { + k8s.serviceMap[newService.Spec.ClusterIP] = newService + if len(newService.Spec.ExternalIPs) != 0 { + for _, eIP := range newService.Spec.ExternalIPs { + k8s.serviceMap[eIP] = newService + } + } + } + }, + DeleteFunc: func(obj interface{}) { + service := obj.(*corev1.Service) + if service.Spec.Type == "LoadBalancer" { + for _, lbIngress := range service.Status.LoadBalancer.Ingress { + lbIP := lbIngress.IP + if lbIP != "" { + delete(k8s.serviceMap, lbIP) + } + } + } else { + delete(k8s.serviceMap, service.Spec.ClusterIP) // Remove deleted service + if len(service.Spec.ExternalIPs) != 0 { + for _, eIP := range service.Spec.ExternalIPs { + delete(k8s.serviceMap, eIP) + } + } + } + }, + }, + ) + k8s.informers["services"] = sc +} + +// == // + +// RunInformers Function that starts running informers +func RunInformers(stopChan chan struct{}, wg *sync.WaitGroup) { + wg.Add(1) + + for name, informer := range K8sH.informers { + name := name + informer := informer + go func() { + log.Printf("[RunInformers] Starting an informer for %s", name) + informer.Run(stopChan) + defer wg.Done() + }() + } + + log.Print("[RunInformers] Started all Kubernetes informers") +} + +// getConfigMap Function +func (k8s *KubernetesHandler) getConfigMap(namespace, name string) (string, error) { + cm, err := k8s.clientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, v1.GetOptions{}) + if err != nil { + log.Fatalf("[K8s] Failed to get ConfigMaps: %v", err) + return "", err + } + + // convert data to string + data, err := json.Marshal(cm.Data) + if err != nil { + log.Fatalf("[K8s] Failed to marshal ConfigMap: %v", err) + return "", err + } + + return string(data), nil +} + +// updateConfigMap Function +func (k8s *KubernetesHandler) updateConfigMap(namespace, name, data string) error { + cm, err := k8s.clientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, v1.GetOptions{}) + if err != nil { + log.Fatalf("[K8s] Failed to get ConfigMap: %v", err) + return err + } + + if _, ok := cm.Data["mesh"]; !ok { + return errors.New("[K8s] Unable to find field \"mesh\" from Istio config") + } + + cm.Data["mesh"] = data + if _, err := k8s.clientSet.CoreV1().ConfigMaps(namespace).Update(context.Background(), cm, v1.UpdateOptions{}); err != nil { + return err + } + + return nil +} + +// PatchNamespaces Function that patches namespaces for adding 'istio-injection' +func PatchNamespaces() bool { + namespaces, err := K8sH.clientSet.CoreV1().Namespaces().List(context.Background(), v1.ListOptions{}) + if err != nil { + log.Fatalf("[PatchNamespaces] Failed to get Namespaces: %v", err) + return false + } + + for _, ns := range namespaces.Items { + namespace := ns.DeepCopy() + + // Skip the following namespaces + if namespace.Name == "sentryflow" { + continue + } + + namespace.Labels["istio-injection"] = "enabled" + + // Patch the namespace + if _, err := K8sH.clientSet.CoreV1().Namespaces().Update(context.TODO(), namespace, v1.UpdateOptions{FieldManager: "patcher"}); err != nil { + log.Fatalf("[PatchNamespaces] Failed to update Namespace %s: %v", namespace.Name, err) + return false + } + + log.Printf("[PatchNamespaces] Updated Namespace %s", namespace.Name) + } + + log.Print("[PatchNamespaces] Updated all Namespaces") + + return true +} + +// restartDeployment Function that performs a rolling restart for a deployment in the specified namespace +// @todo: fix this, this DOES NOT restart deployments +func (k8s *KubernetesHandler) restartDeployment(namespace string, deploymentName string) error { + deploymentClient := k8s.clientSet.AppsV1().Deployments(namespace) + + // Get the deployment to retrieve the current spec + deployment, err := deploymentClient.Get(context.Background(), deploymentName, v1.GetOptions{}) + if err != nil { + return err + } + + // Trigger a rolling restart by updating the deployment's labels or annotations + deployment.Spec.Template.ObjectMeta.Labels["restartedAt"] = v1.Now().String() + + // Update the deployment to trigger the rolling restart + _, err = deploymentClient.Update(context.TODO(), deployment, v1.UpdateOptions{}) + if err != nil { + return err + } + + return nil +} + +// RestartDeployments Function that restarts the deployments in the namespaces with "istio-injection=enabled" +func RestartDeployments() bool { + deployments, err := K8sH.clientSet.AppsV1().Deployments("").List(context.Background(), v1.ListOptions{}) + if err != nil { + log.Fatalf("[PatchDeployments] Failed to get Deployments: %v", err) + return false + } + + for _, deployment := range deployments.Items { + // Skip the following namespaces + if deployment.Namespace == "sentryflow" { + continue + } + + // Restart the deployment + if err := K8sH.restartDeployment(deployment.Namespace, deployment.Name); err != nil { + log.Fatalf("[PatchDeployments] Failed to restart Deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + return false + } + + log.Printf("[PatchDeployments] Deployment %s/%s restarted", deployment.Namespace, deployment.Name) + } + + log.Print("[PatchDeployments] Restarted all patched deployments") + + return true +} + +// == // + +// lookupIPAddress Function +func lookupIPAddress(ipAddr string) interface{} { + // Look for pod map + pod, ok := K8sH.podMap[ipAddr] + if ok { + return pod + } + + // Look for service map + service, ok := K8sH.serviceMap[ipAddr] + if ok { + return service + } + + return nil +} + +// LookupK8sResource Function +func LookupK8sResource(srcIP string) types.K8sResource { + ret := types.K8sResource{ + Namespace: "Unknown", + Name: "Unknown", + Labels: make(map[string]string), + Type: types.K8sResourceTypeUnknown, + } + + // Find Kubernetes resource from source IP (service or a pod) + raw := lookupIPAddress(srcIP) + + // Currently supports Service or Pod + switch raw.(type) { + case *corev1.Pod: + pod, ok := raw.(*corev1.Pod) + if ok { + ret.Namespace = pod.Namespace + ret.Name = pod.Name + ret.Labels = pod.Labels + ret.Type = types.K8sResourceTypePod + } + case *corev1.Service: + svc, ok := raw.(*corev1.Service) + if ok { + ret.Namespace = svc.Namespace + ret.Name = svc.Name + ret.Labels = svc.Labels + ret.Type = types.K8sResourceTypeService + } + default: + ret.Type = types.K8sResourceTypeUnknown + } + + return ret +} + +// == // diff --git a/sentryflow/main.go b/sentryflow/main.go index 626777d..d96e538 100644 --- a/sentryflow/main.go +++ b/sentryflow/main.go @@ -3,25 +3,13 @@ package main import ( - "github.com/5GSEC/SentryFlow/collector" - "github.com/5GSEC/SentryFlow/core" - _ "google.golang.org/grpc/encoding/gzip" // If not set, encoding problem occurs https://stackoverflow.com/questions/74062727 - "log" + "github.com/5gsec/SentryFlow/core" ) -// main is the entrypoint of this program -func main() { - go func() { - core.SentryFlow() - }() - - err := collector.Ch.InitGRPCServer() - if err != nil { - log.Fatalf("[Error] Unable to start collector gRPC Server: %v", err) - } +// ========== // +// == Main == // +// ========== // - err = collector.Ch.Serve() - if err != nil { - log.Fatalf("[Error] Unable to serve gRPC Server: %v", err) - } +func main() { + core.SentryFlow() } diff --git a/sentryflow/metrics/api/apiAnalyzer.go b/sentryflow/metrics/api/apiAnalyzer.go deleted file mode 100644 index 78a2ff7..0000000 --- a/sentryflow/metrics/api/apiAnalyzer.go +++ /dev/null @@ -1,92 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "sync" -) - -// aa Local reference for API analyzer -var aa *Analyzer - -// init function -func init() { - aa = NewAPIAnalyzer() -} - -// Analyzer Structure -type Analyzer struct { - perAPICount map[string]uint64 - perAPICountLock sync.Mutex // @todo perhaps combine those two? - - curBatchCount int - batchCountLock sync.Mutex - - stopChan chan struct{} - apiJob chan string -} - -// NewAPIAnalyzer Function -func NewAPIAnalyzer() *Analyzer { - ret := &Analyzer{ - perAPICount: make(map[string]uint64), - } - - return ret -} - -// StartAPIAnalyzer Function -func StartAPIAnalyzer(wg *sync.WaitGroup) { - go apiAnalyzerRoutine(wg) -} - -// StopAPIAnalyzer Function -func StopAPIAnalyzer() { - aa.stopChan <- struct{}{} -} - -// apiAnalyzerRoutine Function -func apiAnalyzerRoutine(wg *sync.WaitGroup) { - wg.Add(1) - for { - select { - case job, ok := <-aa.apiJob: - if !ok { - // @todo perhaps error message here? - continue - } - analyzeAPI(job) - - case <-aa.stopChan: - wg.Done() - break - } - } -} - -// analyzeAPI Function -func analyzeAPI(api string) { - // @todo implement this - classifyAPI(api) -} - -// GetPerAPICount Function -func GetPerAPICount() map[string]uint64 { - aa.perAPICountLock.Lock() - ret := aa.perAPICount - aa.perAPICountLock.Unlock() - - return ret -} - -// UpdatePerAPICount Function -func UpdatePerAPICount(nm map[string]uint64) { - aa.perAPICountLock.Lock() - aa.perAPICount = nm - aa.perAPICountLock.Unlock() -} - -// InsertAnalyzeJob Function -func InsertAnalyzeJob(api string) { - aa.apiJob <- api -} diff --git a/sentryflow/metrics/api/apiClassifier.go b/sentryflow/metrics/api/apiClassifier.go deleted file mode 100644 index e251dc1..0000000 --- a/sentryflow/metrics/api/apiClassifier.go +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package api - -type node struct { - path string - count int - child []*node -} - -type classifiedAPI struct { - destination string - method string - URIRoot *node -} - -// classifyAPI Function -func classifyAPI(api string) { -} - -// generateMetric Function -func generateMetric(cal classifiedAPI) { - -} - -// statisticOfAPIsPerDestination Function -func statisticOfAPIsPerDestination(cal classifiedAPI) { - -} - -// statisticOfAPIsPerMin Function -func statisticOfAPIsPerMin(cal classifiedAPI) { - -} - -// statisticOfErrorAPI Function -func statisticOfErrorAPI(cal classifiedAPI) { - -} - -// statisticOfAPILatency Function -func statisticOfAPILatency(cal classifiedAPI) { - -} diff --git a/sentryflow/metrics/metricHandler.go b/sentryflow/metrics/metricHandler.go deleted file mode 100644 index 2e78627..0000000 --- a/sentryflow/metrics/metricHandler.go +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -import ( - "sync" - - "github.com/5GSEC/SentryFlow/metrics/api" - "github.com/5GSEC/SentryFlow/protobuf" -) - -// Mh Global reference for metric handler -var Mh *MetricHandler - -// init Function -func init() { - Mh = NewMetricHandler() -} - -// MetricHandler Structure -type MetricHandler struct { -} - -// NewMetricHandler Function -func NewMetricHandler() *MetricHandler { - mh := &MetricHandler{} - - return mh -} - -// StartMetricsAnalyzer Function -func StartMetricsAnalyzer(wg *sync.WaitGroup) { - api.StartAPIAnalyzer(wg) -} - -// StopMetricsAnalyzer Function -func StopMetricsAnalyzer() { - api.StopAPIAnalyzer() -} - -// InsertAccessLog Function -func InsertAccessLog(al *protobuf.APILog) { - // @todo: make this fixed, for now will just send path from AccessLog - api.InsertAnalyzeJob(al.Path) -} diff --git a/sentryflow/processor/apiAnalyzer.go b/sentryflow/processor/apiAnalyzer.go new file mode 100644 index 0000000..b7d7474 --- /dev/null +++ b/sentryflow/processor/apiAnalyzer.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "log" + "sync" + + "github.com/5gsec/SentryFlow/config" +) + +// == // + +// APIA Local reference for API Analyzer +var APIA *Analyzer + +// init function +func init() { + APIA = NewAPIAnalyzer() +} + +// Analyzer Structure +type Analyzer struct { + stopChan chan struct{} + + apiLog chan string + apiLogs []string + apiLogsLock sync.Mutex +} + +// NewAPIAnalyzer Function +func NewAPIAnalyzer() *Analyzer { + ret := &Analyzer{ + apiLog: make(chan string), + apiLogs: []string{}, + apiLogsLock: sync.Mutex{}, + } + return ret +} + +// StartAPIAnalyzer Function +func StartAPIAnalyzer(wg *sync.WaitGroup) bool { + // keep analyzing given APIs + go analyzeAPIs(wg) + + log.Print("[APIAnalyzer] Started API Analyzer") + + return true +} + +// AnalyzeAPI Function +func AnalyzeAPI(api string) { + APIA.apiLog <- api +} + +// StopAPIAnalyzer Function +func StopAPIAnalyzer() bool { + APIA.stopChan <- struct{}{} + + log.Print("[APIAnalyzer] Stopped API Analyzer") + + return true +} + +// == // + +// analyzeAPIs Function +func analyzeAPIs(wg *sync.WaitGroup) { + wg.Add(1) + + for { + select { + case api, ok := <-APIA.apiLog: + if !ok { + continue + } + + APIA.apiLogsLock.Lock() + + APIA.apiLogs = append(APIA.apiLogs, api) + + if len(APIA.apiLogs) > config.GlobalConfig.AIEngineBatchSize { + ClassifyAPIs(APIA.apiLogs) + APIA.apiLogs = []string{} + } + + APIA.apiLogsLock.Unlock() + case <-APIA.stopChan: + wg.Done() + return + } + } +} + +// == // diff --git a/sentryflow/processor/apiClassifier.go b/sentryflow/processor/apiClassifier.go new file mode 100644 index 0000000..6852371 --- /dev/null +++ b/sentryflow/processor/apiClassifier.go @@ -0,0 +1,238 @@ +// // SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "context" + "fmt" + "io" + "log" + "sync" + "time" + + "github.com/5gsec/SentryFlow/config" + "github.com/5gsec/SentryFlow/exporter" + "github.com/5gsec/SentryFlow/protobuf" + "google.golang.org/grpc" +) + +// APIC Local reference for AI-driven API Classifier +var APIC *APIClassifier + +// APIClassifier Structure +type APIClassifier struct { + stopChan chan struct{} + + APIs chan []string + aggregatedLogs chan []*protobuf.APILog + + connected bool + AIStream *streamInform + + reConnTrial time.Duration +} + +// streamInform Structure +type streamInform struct { + AIStream protobuf.APIClassifier_ClassifyAPIsClient +} + +// init Function +func init() { + APIC = NewAPIClassifier() +} + +// NewAPIClassifier Function +func NewAPIClassifier() *APIClassifier { + ah := &APIClassifier{ + stopChan: make(chan struct{}), + + APIs: make(chan []string), + aggregatedLogs: make(chan []*protobuf.APILog), + + reConnTrial: (1 * time.Minute), + } + + return ah +} + +// checkAPIClassifier Function +func checkAPIClassifier() bool { + AIEngineService := fmt.Sprintf("%s:%s", config.GlobalConfig.AIEngineService, config.GlobalConfig.AIEngineServicePort) + + // test gRPC connection + conn, err := grpc.Dial(AIEngineService, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(5*time.Second)) + if err != nil { + return false + } + defer conn.Close() + + return true +} + +// initAPIClassifier Function +func initAPIClassifier() bool { + AIEngineService := fmt.Sprintf("%s:%s", config.GlobalConfig.AIEngineService, config.GlobalConfig.AIEngineServicePort) + + // Set up a connection to the server + conn, err := grpc.Dial(AIEngineService, grpc.WithInsecure()) + if err != nil { + log.Fatalf("[APIClassifier] Failed to connect to %s: %v", AIEngineService, err) + return false + } + + log.Printf("[APIClassifier] Connecting to %s", AIEngineService) + + client := protobuf.NewAPIClassifierClient(conn) + + // Start serving gRPC server + stream, err := client.ClassifyAPIs(context.Background()) + if err != nil { + log.Fatalf("[APIClassifier] Failed to make a stream: %v", err) + return false + } + + log.Printf("[APIClassifier] Successfully connected to %s", AIEngineService) + + APIC.AIStream = &streamInform{ + AIStream: stream, + } + + log.Print("[APIClassifier] Started API Classifier") + + return true +} + +// StartAPIClassifier Function +func StartAPIClassifier(wg *sync.WaitGroup) bool { + if !checkAPIClassifier() { + APIC.connected = false + } + + if initAPIClassifier() { + APIC.connected = true + } + + go connRoutine(wg) + go sendAPIRoutine(wg) + go recvAPIRoutine(wg) + + return true +} + +// ClassifyAPIs function +func ClassifyAPIs(APIs []string) { + if APIC.connected { + APIC.APIs <- APIs + } +} + +// StopAPIClassifier Function +func StopAPIClassifier() bool { + // one for connRoutine + APIC.stopChan <- struct{}{} + + // one for sendAPIRoutine + APIC.stopChan <- struct{}{} + + // one for recvAPIRoutine + APIC.stopChan <- struct{}{} + + log.Print("[APIClassifier] Stopped API Classifier") + + return true +} + +// connRoutine Function +func connRoutine(wg *sync.WaitGroup) { + wg.Add(1) + + for { + select { + case <-APIC.stopChan: + wg.Done() + return + default: + if !APIC.connected { + if checkAPIClassifier() && initAPIClassifier() { + APIC.connected = true + } else { + time.Sleep(APIC.reConnTrial) + } + } + } + } +} + +// sendAPIRoutine Function +func sendAPIRoutine(wg *sync.WaitGroup) { + wg.Add(1) + + for { + if !APIC.connected { + time.Sleep(APIC.reConnTrial) + continue + } + + select { + case api, ok := <-APIC.APIs: + if !ok { + log.Fatal("[APIClassifier] Failed to fetch APIs from APIs channel") + continue + } + + curAPIRequest := &protobuf.APIClassifierRequest{ + API: api, + } + + err := APIC.AIStream.AIStream.Send(curAPIRequest) + if err != nil { + log.Fatalf("[APIClassifier] Failed to send an API to AI Engine: %v", err) + APIC.connected = false + continue + } + case <-APIC.stopChan: + wg.Done() + return + } + } +} + +// recvAPIRoutine Function +func recvAPIRoutine(wg *sync.WaitGroup) { + wg.Add(1) + + for { + if !APIC.connected { + time.Sleep(APIC.reConnTrial) + continue + } + + select { + default: + APIMetrics := make(map[string]uint64) + + event, err := APIC.AIStream.AIStream.Recv() + if err == io.EOF { + continue + } else if err != nil { + log.Fatalf("[APIClassifier] Failed to receive an event from AI Engine: %v", err) + APIC.connected = false + continue + } + + for api, count := range event.APIs { + APIMetrics[api] = count + } + + err = exporter.ExpH.SendAPIMetrics(&protobuf.APIMetrics{PerAPICounts: APIMetrics}) + if err != nil { + log.Fatalf("[APIClassifier] Failed to export API metrics: %v", err) + continue + } + case <-APIC.stopChan: + wg.Done() + return + } + } +} diff --git a/sentryflow/processor/logProcessor.go b/sentryflow/processor/logProcessor.go new file mode 100644 index 0000000..471c8d6 --- /dev/null +++ b/sentryflow/processor/logProcessor.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "log" + "sync" + + "github.com/5gsec/SentryFlow/exporter" + "github.com/5gsec/SentryFlow/protobuf" +) + +// == // + +// LogH global reference for Log Handler +var LogH *LogHandler + +// init Function +func init() { + LogH = NewLogHandler() +} + +// LogHandler Structure +type LogHandler struct { + stopChan chan struct{} + + apiLogChan chan interface{} + metricsChan chan interface{} +} + +// NewLogHandler Structure +func NewLogHandler() *LogHandler { + lh := &LogHandler{ + stopChan: make(chan struct{}), + + apiLogChan: make(chan interface{}), + metricsChan: make(chan interface{}), + } + + return lh +} + +// == // + +// StartLogProcessor Function +func StartLogProcessor(wg *sync.WaitGroup) bool { + // handle API logs + go ProcessAPILogs(wg) + + // handle Envoy metrics + go ProcessEnvoyMetrics(wg) + + log.Print("[LogProcessor] Started Log Processors") + + return true +} + +// StopLogProcessor Function +func StopLogProcessor() bool { + // One for ProcessAPILogs + LogH.stopChan <- struct{}{} + + // One for ProcessMetrics + LogH.stopChan <- struct{}{} + + log.Print("[LogProcessor] Stopped Log Processors") + + return true +} + +// == // + +// ProcessAPILogs Function +func ProcessAPILogs(wg *sync.WaitGroup) { + wg.Add(1) + + for { + select { + case logType, ok := <-LogH.apiLogChan: + if !ok { + log.Fatal("[LogProcessor] Failed to process an API log") + } + + go AnalyzeAPI(logType.(*protobuf.APILog).Path) + go exporter.InsertAPILog(logType.(*protobuf.APILog)) + + case <-LogH.stopChan: + wg.Done() + return + } + } +} + +// InsertAPILog Function +func InsertAPILog(data interface{}) { + LogH.apiLogChan <- data +} + +// ProcessEnvoyMetrics Function +func ProcessEnvoyMetrics(wg *sync.WaitGroup) { + wg.Add(1) + + for { + select { + case logType, ok := <-LogH.metricsChan: + if !ok { + log.Fatal("[LogProcessor] Failed to process Envoy metrics") + } + + go exporter.InsertEnvoyMetrics(logType.(*protobuf.EnvoyMetrics)) + + case <-LogH.stopChan: + wg.Done() + return + } + } +} + +// InsertMetrics Function +func InsertMetrics(data interface{}) { + LogH.metricsChan <- data +} + +// == // diff --git a/sentryflow/types/metrics.go b/sentryflow/types/metrics.go deleted file mode 100644 index 1a95584..0000000 --- a/sentryflow/types/metrics.go +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "github.com/5GSEC/SentryFlow/protobuf" -) - -// PerAPICount Structure -type PerAPICount struct { - API string - Count uint64 -} - -// DbAccessLogType Structure -type DbAccessLogType struct { - Namespace string - Labels string - AccessLog *protobuf.APILog -} diff --git a/sentryflow/types/k8sResources.go b/sentryflow/types/types.go similarity index 72% rename from sentryflow/types/k8sResources.go rename to sentryflow/types/types.go index 50f8fb3..4ce59ab 100644 --- a/sentryflow/types/k8sResources.go +++ b/sentryflow/types/types.go @@ -2,33 +2,35 @@ package types -// k8sResources const +// == // + +// K8sResourceTypes const ( K8sResourceTypeUnknown = 0 K8sResourceTypePod = 1 K8sResourceTypeService = 2 ) -// K8sNetworkedResource Structure -type K8sNetworkedResource struct { - Name string +// K8sResource Structure +type K8sResource struct { + Type uint8 Namespace string + Name string Labels map[string]string Containers []string - Type uint8 } // K8sResourceTypeToString Function -func K8sResourceTypeToString(t uint8) string { - switch t { +func K8sResourceTypeToString(resourceType uint8) string { + switch resourceType { case K8sResourceTypePod: return "Pod" case K8sResourceTypeService: return "Service" case K8sResourceTypeUnknown: - default: return "Unknown" } - return "Unknown" } + +// == //