Skip to content

Commit

Permalink
adding collecting metrics
Browse files Browse the repository at this point in the history
Signed-off-by: Paige Patton <[email protected]>
  • Loading branch information
paigerube14 committed Jan 27, 2025
1 parent 0372013 commit 7d0deba
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 18 deletions.
2 changes: 0 additions & 2 deletions config/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,6 @@ performance_monitoring:
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
elastic:
enable_elastic: False
collect_metrics: False
collect_alerts: False
verify_certs: False
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
elastic_port: 32766
Expand Down
7 changes: 2 additions & 5 deletions krkn/prometheus/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ def alerts(
start_time,
end_time,
alert_profile,
elastic_collect_alerts,
elastic_alerts_index,
):

Expand Down Expand Up @@ -56,7 +55,6 @@ def alerts(
processed_alert[0]
and processed_alert[1]
and elastic
and elastic_collect_alerts
):
elastic_alert = ElasticAlert(
run_uuid=run_uuid,
Expand Down Expand Up @@ -156,7 +154,6 @@ def metrics(
start_time,
end_time,
metrics_profile,
elastic_collect_metrics,
elastic_metrics_index,
) -> list[dict[str, list[(int, float)] | str]]:
metrics_list: list[dict[str, list[(int, float)] | str]] = []
Expand Down Expand Up @@ -185,7 +182,7 @@ def metrics(
end_time=datetime.datetime.fromtimestamp(end_time),
)

metric = {"name": metric_query["metricName"], "values": []}
metric = {"name": metric_query["metricName"], "values": [], }
for returned_metric in metrics_result:
if "values" in returned_metric:
for value in returned_metric["values"]:
Expand All @@ -195,7 +192,7 @@ def metrics(
pass
metrics_list.append(metric)

if elastic_collect_metrics and elastic:
if elastic:
result = elastic.upload_metrics_to_elasticsearch(
run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list
)
Expand Down
14 changes: 3 additions & 11 deletions run_kraken.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,13 +86,6 @@ def main(cfg) -> int:
)
# elastic search
enable_elastic = get_yaml_item_value(config["elastic"], "enable_elastic", False)
elastic_collect_metrics = get_yaml_item_value(
config["elastic"], "collect_metrics", False
)

elastic_colllect_alerts = get_yaml_item_value(
config["elastic"], "collect_alerts", False
)

elastic_url = get_yaml_item_value(config["elastic"], "elastic_url", "")

Expand Down Expand Up @@ -193,13 +186,14 @@ def main(cfg) -> int:
if not prometheus_url:
try:
connection_data = ocpcli.get_prometheus_api_connection_data()
logging.info('connection data' + str(connection_data.endpoint))
if connection_data:
prometheus_url = connection_data.endpoint
prometheus_bearer_token = connection_data.token
else:
# If can't make a connection, set alerts to false
enable_alerts = False
critical_alerts = False
check_critical_alerts = False
except Exception:
logging.error(
"invalid distribution selected, running openshift scenarios against kubernetes cluster."
Expand Down Expand Up @@ -466,7 +460,6 @@ def main(cfg) -> int:
start_time,
end_time,
alert_profile,
elastic_colllect_alerts,
elastic_alerts_index,
)

Expand All @@ -478,11 +471,10 @@ def main(cfg) -> int:
prometheus_plugin.metrics(
prometheus,
elastic_search,
start_time,
run_uuid,
start_time,
end_time,
metrics_profile,
elastic_collect_metrics,
elastic_metrics_index,
)

Expand Down

0 comments on commit 7d0deba

Please sign in to comment.