Skip to content

Commit

Permalink
Merge pull request #69 from ropable/master
Browse files Browse the repository at this point in the history
Logging, exception handling, update minor dependencies, update GitHub workflows
  • Loading branch information
ropable authored Oct 14, 2024
2 parents bbb023c + e6382e7 commit 1e4b513
Show file tree
Hide file tree
Showing 7 changed files with 148 additions and 55 deletions.
2 changes: 0 additions & 2 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@ updates:
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
# Workflow files stored in the
# default location of `.github/workflows`
directory: "/"
schedule:
interval: "weekly"
33 changes: 28 additions & 5 deletions .github/workflows/image-build-scan.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@ jobs:
contents: read
packages: write
security-events: write
# Only required for workflows in private repositories
actions: read
steps:
#----------------------------------------------
# Checkout repo
#----------------------------------------------
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
#----------------------------------------------
# Set up Docker BuildX environment
#----------------------------------------------
Expand Down Expand Up @@ -64,6 +64,29 @@ jobs:
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
# NOTE: code scanning is not available for private repos without using
# GitHub Enterprise Cloud. Reference:
# https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning-for-a-repository
scan:
name: Image vulnerability scan
runs-on: ubuntu-latest
needs: [build]
permissions:
contents: read
packages: read
security-events: write
steps:
#----------------------------------------------
# Run vulnerability scan on built image
#----------------------------------------------
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'image'
scanners: 'vuln'
image-ref: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
vuln-type: 'os,library'
severity: 'HIGH,CRITICAL'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: 'trivy-results.sarif'
25 changes: 25 additions & 0 deletions .github/workflows/secret-scan.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: "Scan project for secrets & sensitive information"

on:
push:
branches:
- master
pull_request:
branches:
- master

jobs:
secret-scan:
name: Scan project for secrets
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Secret scanning
uses: trufflesecurity/trufflehog@main
with:
base: ""
head: ${{ github.ref_name }}
extra_args: --only-verified
46 changes: 23 additions & 23 deletions kustomize/overlays/prod/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
nameSuffix: -prod
resources:
- ../../base
- ingress.yaml
- pdb.yaml
secretGenerator:
- name: healthcheck-env
type: Opaque
envs:
- .env
labels:
- includeSelectors: true
pairs:
variant: prod
patches:
- path: deployment_patch.yaml
- path: deployment_hpa_patch.yaml
- path: service_patch.yaml
images:
- name: ghcr.io/dbca-wa/healthcheck
newTag: 1.3.2
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
nameSuffix: -prod
resources:
- ../../base
- ingress.yaml
- pdb.yaml
secretGenerator:
- name: healthcheck-env
type: Opaque
envs:
- .env
labels:
- includeSelectors: true
pairs:
variant: prod
patches:
- path: deployment_patch.yaml
- path: deployment_hpa_patch.yaml
- path: service_patch.yaml
images:
- name: ghcr.io/dbca-wa/healthcheck
newTag: 1.3.3
14 changes: 7 additions & 7 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
[tool.poetry]
name = "healthcheck"
version = "1.3.2"
version = "1.3.3"
description = "Internal service endpoint health check for Spatial Support System"
authors = ["ASI <[email protected]>"]
package-mode = false

[tool.poetry.dependencies]
python = "^3.12"
bottle = "0.13.1"
bottle = "0.13.2"
requests = "2.32.3"
pytz = "2024.1"
pytz = "2024.2"
python-dotenv = "1.0.1"
gunicorn = "23.0.0"
humanize = "4.11.0"
Expand Down
77 changes: 62 additions & 15 deletions status.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import json
import logging
import os
import sys
from datetime import datetime
from zoneinfo import ZoneInfo

Expand All @@ -16,6 +18,15 @@
app = application = Bottle()


# Configure logging.
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
LOGGER.addHandler(handler)

TZ = ZoneInfo(os.environ.get("TZ", "Australia/Perth"))
OUTPUT_TEMPLATE = """<!DOCTYPE html>
<html lang="en">
Expand Down Expand Up @@ -83,10 +94,7 @@ def get_session():

def healthcheck():
"""Query HTTP sources and derive a dictionary of response successes."""
d = {
"server_time": datetime.now().astimezone(TZ).isoformat(timespec="seconds"),
"success": True,
}
d = {"server_time": datetime.now().astimezone(TZ).isoformat(timespec="seconds"), "success": True, "errors": []}

session = get_session()

Expand All @@ -99,7 +107,10 @@ def healthcheck():
d["latest_point_delay"] = trackingdata["objects"][0]["age_minutes"]
if trackingdata["objects"][0]["age_minutes"] > TRACKING_POINTS_MAX_DELAY:
d["success"] = False
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying Resource Tracking: {SSS_DEVICES_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying Resource Tracking: {SSS_DEVICES_URL}")
d["latest_point"] = None
d["latest_point_delay"] = None
d["success"] = False
Expand All @@ -113,7 +124,10 @@ def healthcheck():
d["iridium_latest_point_delay"] = trackingdata["objects"][0]["age_minutes"]
if trackingdata["objects"][0]["age_minutes"] > TRACKING_POINTS_MAX_DELAY:
d["success"] = False
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying Resource Tracking: {SSS_IRIDIUM_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying Resource Tracking: {SSS_IRIDIUM_URL}")
d["iridium_latest_point"] = None
d["iridium_latest_point_delay"] = None
d["success"] = False
Expand All @@ -125,7 +139,10 @@ def healthcheck():
t = datetime.fromisoformat(trackingdata["objects"][0]["seen"]).astimezone(TZ)
d["tracplus_latest_point"] = t.isoformat()
d["tracplus_latest_point_delay"] = trackingdata["objects"][0]["age_minutes"]
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying Resource Tracking: {SSS_TRACPLUS_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying Resource Tracking: {SSS_TRACPLUS_URL}")
d["tracplus_latest_point"] = None
d["tracplus_latest_point_delay"] = None
d["success"] = False
Expand All @@ -137,7 +154,10 @@ def healthcheck():
t = datetime.fromisoformat(trackingdata["objects"][0]["seen"]).astimezone(TZ)
d["dfes_latest_point"] = t.isoformat()
d["dfes_latest_point_delay"] = trackingdata["objects"][0]["age_minutes"]
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying Resource Tracking: {SSS_DFES_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying Resource Tracking: {SSS_DFES_URL}")
d["dfes_latest_point"] = None
d["dfes_latest_point_delay"] = None
d["success"] = False
Expand All @@ -151,7 +171,10 @@ def healthcheck():
d["fleetcare_latest_point_delay"] = trackingdata["objects"][0]["age_minutes"]
if trackingdata["objects"][0]["age_minutes"] > TRACKING_POINTS_MAX_DELAY:
d["success"] = False
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying Resource Tracking: {SSS_FLEETCARE_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying Resource Tracking: {SSS_FLEETCARE_URL}")
d["fleetcare_latest_point"] = None
d["fleetcare_latest_point_delay"] = None
d["success"] = False
Expand All @@ -161,7 +184,10 @@ def healthcheck():
resp.raise_for_status()
j = resp.json()
d["csw_catalogue_count"] = len(j)
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying CSW API: {CSW_API}")
LOGGER.warning(e)
d["errors"].append(f"Error querying CSW API: {CSW_API}")
d["csw_catalogue_count"] = None
d["success"] = False

Expand All @@ -179,7 +205,10 @@ def healthcheck():
root = ET.fromstring(resp.content)
resp_d = {i[0]: i[1] for i in root.items()}
d["todays_burns_count"] = int(resp_d["numberOfFeatures"])
except Exception:
except Exception as e:
LOGGER.warning("Error querying KMI WFS (public:todays_burns)")
LOGGER.warning(e)
d["errors"].append("Error querying KMI WFS (public:todays_burns)")
d["todays_burns_count"] = None
d["success"] = False

Expand All @@ -191,7 +220,10 @@ def healthcheck():
ns = {"wmts": "http://www.opengis.net/wmts/1.0", "ows": "http://www.opengis.net/ows/1.1"}
layers = root.findall(".//wmts:Layer", ns)
d["kmi_wmts_layer_count"] = len(layers)
except Exception:
except Exception as e:
LOGGER.warning("Error querying KMI WMTS layer count")
LOGGER.warning(e)
d["errors"].append("Error querying KMI WMTS layer count")
d["kmi_wmts_layer_count"] = None
d["success"] = False

Expand All @@ -200,7 +232,10 @@ def healthcheck():
resp.raise_for_status()
j = resp.json()
d["bfrs_profile_api_endpoint"] = True
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying BFRS API endpoint: {BFRS_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying BFRS API endpoint: {BFRS_URL}")
d["bfrs_profile_api_endpoint"] = None
d["success"] = False

Expand Down Expand Up @@ -259,7 +294,10 @@ def healthcheck():
resp.raise_for_status()
j = resp.json()
d["auth2_status"] = j["healthy"]
except Exception:
except Exception as e:
LOGGER.warning(f"Error querying Auth2 status API endpoint: {AUTH2_STATUS_URL}")
LOGGER.warning(e)
d["errors"].append(f"Error querying Auth2 status API endpoint: {AUTH2_STATUS_URL}")
d["auth2_status"] = None
d["success"] = False

Expand All @@ -273,7 +311,16 @@ def healthcheck_json():
if CACHE_RESPONSE:
# Mark response as "cache for 60 seconds".
response.set_header("Cache-Control", "max-age=60")
return json.dumps(d)

try:
return json.dumps(d)
except Exception as e:
LOGGER.warning("Error serialising healthcheck response as JSON")
LOGGER.warning(e)
return {
"server_time": datetime.now().astimezone(TZ).isoformat(timespec="seconds"),
"success": False,
}


# Retain legacy health check route for PRTG.
Expand Down

0 comments on commit 1e4b513

Please sign in to comment.