From 6689005544e67befd50dcc134bb4b6a6ef2e8c35 Mon Sep 17 00:00:00 2001 From: Mikolaj Pawlikowski Date: Sat, 5 Jun 2021 13:53:07 +0100 Subject: [PATCH] K8s autodiscovery (#453) * Add a new dependency on Kubernetes package * Add and store a new flag about automatic nodes discovery from a pod * Implement the listing of nodes * Add tests to cover the k8s node listing * Fix the k8s listing test to ensure the load incluster function is actually called * Add more help to the k8s node discovery flags, and cross-reference them. * Add a note on the Kubernetes auto-discovery in the main README file * Move the kubernetes discovery from conf to modules/discovery * When running with --pods, run the Kubernetes auto discovery * Also mention that the auto discovery is always on when using --pod Co-authored-by: Mikolaj Pawlikowski --- README.md | 6 ++++ kube_hunter/__main__.py | 4 ++- kube_hunter/conf/__init__.py | 2 ++ kube_hunter/conf/parser.py | 20 ++++++++++++ kube_hunter/modules/discovery/hosts.py | 7 +++++ .../modules/discovery/kubernetes_client.py | 27 ++++++++++++++++ setup.cfg | 1 + tests/discovery/test_k8s.py | 31 +++++++++++++++++++ 8 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 kube_hunter/modules/discovery/kubernetes_client.py create mode 100644 tests/discovery/test_k8s.py diff --git a/README.md b/README.md index d735e8fc..52c2e6df 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,12 @@ To specify interface scanning, you can use the `--interface` option (this will s To specify a specific CIDR to scan, use the `--cidr` option. Example: `kube-hunter --cidr 192.168.0.0/24` +4. **Kubernetes node auto-discovery** + +Set `--k8s-auto-discover-nodes` flag to query Kubernetes for all nodes in the cluster, and then attempt to scan them all. By default, it will use [in-cluster config](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to connect to the Kubernetes API. If you'd like to use an explicit kubeconfig file, set `--kubeconfig /location/of/kubeconfig/file`. + +Also note, that this is always done when using `--pod` mode. + ### Active Hunting Active hunting is an option in which kube-hunter will exploit vulnerabilities it finds, to explore for further vulnerabilities. diff --git a/kube_hunter/__main__.py b/kube_hunter/__main__.py index 9002bee3..2b503dd4 100755 --- a/kube_hunter/__main__.py +++ b/kube_hunter/__main__.py @@ -25,6 +25,8 @@ quick=args.quick, remote=args.remote, statistics=args.statistics, + k8s_auto_discover_nodes=args.k8s_auto_discover_nodes, + kubeconfig=args.kubeconfig, ) setup_logger(args.log, args.log_file) set_config(config) @@ -88,7 +90,7 @@ def list_hunters(): def main(): global hunt_started - scan_options = [config.pod, config.cidr, config.remote, config.interface] + scan_options = [config.pod, config.cidr, config.remote, config.interface, config.k8s_auto_discover_nodes] try: if args.list: list_hunters() diff --git a/kube_hunter/conf/__init__.py b/kube_hunter/conf/__init__.py index 853b0cd0..f931518e 100644 --- a/kube_hunter/conf/__init__.py +++ b/kube_hunter/conf/__init__.py @@ -36,6 +36,8 @@ class Config: remote: Optional[str] = None reporter: Optional[Any] = None statistics: bool = False + k8s_auto_discover_nodes: bool = False + kubeconfig: Optional[str] = None _config: Optional[Config] = None diff --git a/kube_hunter/conf/parser.py b/kube_hunter/conf/parser.py index 7d240826..3590b3f9 100644 --- a/kube_hunter/conf/parser.py +++ b/kube_hunter/conf/parser.py @@ -46,6 +46,26 @@ def parser_add_arguments(parser): help="One or more remote ip/dns to hunt", ) + parser.add_argument( + "--k8s-auto-discover-nodes", + action="store_true", + help="Enables automatic detection of all nodes in a Kubernetes cluster " + "by quering the Kubernetes API server. " + "It supports both in-cluster config (when running as a pod), " + "and a specific kubectl config file (use --kubeconfig to set this). " + "By default, when this flag is set, it will use in-cluster config. " + "NOTE: this is automatically switched on in --pod mode." + ) + + parser.add_argument( + "--kubeconfig", + type=str, + metavar="KUBECONFIG", + default=None, + help="Specify the kubeconfig file to use for Kubernetes nodes auto discovery " + " (to be used in conjuction with the --k8s-auto-discover-nodes flag." + ) + parser.add_argument("--active", action="store_true", help="Enables active hunting") parser.add_argument( diff --git a/kube_hunter/modules/discovery/hosts.py b/kube_hunter/modules/discovery/hosts.py index 440a8055..5c83d577 100644 --- a/kube_hunter/modules/discovery/hosts.py +++ b/kube_hunter/modules/discovery/hosts.py @@ -8,6 +8,7 @@ from netifaces import AF_INET, ifaddresses, interfaces, gateways from kube_hunter.conf import get_config +from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes from kube_hunter.core.events import handler from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability from kube_hunter.core.types import Discovery, InformationDisclosure, AWS, Azure @@ -114,6 +115,9 @@ def __init__(self, event): def execute(self): config = get_config() + # Attempt to read all hosts from the Kubernetes API + for host in list_all_k8s_cluster_nodes(config.kubeconfig): + self.publish_event(NewHostEvent(host=host)) # Scan any hosts that the user specified if config.remote or config.cidr: self.publish_event(HostScanEvent()) @@ -298,6 +302,9 @@ def execute(self): elif len(config.remote) > 0: for host in config.remote: self.publish_event(NewHostEvent(host=host)) + elif config.k8s_auto_discover_nodes: + for host in list_all_k8s_cluster_nodes(config.kubeconfig): + self.publish_event(NewHostEvent(host=host)) # for normal scanning def scan_interfaces(self): diff --git a/kube_hunter/modules/discovery/kubernetes_client.py b/kube_hunter/modules/discovery/kubernetes_client.py new file mode 100644 index 00000000..e771ac25 --- /dev/null +++ b/kube_hunter/modules/discovery/kubernetes_client.py @@ -0,0 +1,27 @@ +import logging +import kubernetes + + +def list_all_k8s_cluster_nodes(kube_config=None, client=None): + logger = logging.getLogger(__name__) + try: + if kube_config: + logger.info("Attempting to use kubeconfig file: %s", kube_config) + kubernetes.config.load_kube_config(config_file=kube_config) + else: + logger.info("Attempting to use in cluster Kubernetes config") + kubernetes.config.load_incluster_config() + except kubernetes.config.config_exception.ConfigException: + logger.exception("Failed to initiate Kubernetes client") + return + + try: + if client is None: + client = kubernetes.client.CoreV1Api() + ret = client.list_node(watch=False) + logger.info("Listed %d nodes in the cluster" % len(ret.items)) + for item in ret.items: + for addr in item.status.addresses: + yield addr.address + except: + logger.exception("Failed to list nodes from Kubernetes") diff --git a/setup.cfg b/setup.cfg index 9c11bbfd..53a2f8ac 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,6 +41,7 @@ install_requires = packaging dataclasses pluggy + kubernetes==12.0.1 setup_requires = setuptools>=30.3.0 setuptools_scm diff --git a/tests/discovery/test_k8s.py b/tests/discovery/test_k8s.py new file mode 100644 index 00000000..f7e74c7a --- /dev/null +++ b/tests/discovery/test_k8s.py @@ -0,0 +1,31 @@ +from kube_hunter.conf import Config, set_config + +set_config(Config()) + +from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes +from unittest.mock import MagicMock, patch + + + +def test_client_yields_ips(): + client = MagicMock() + response = MagicMock() + client.list_node.return_value = response + response.items = [MagicMock(), MagicMock()] + response.items[0].status.addresses = [MagicMock(), MagicMock()] + response.items[0].status.addresses[0].address = "127.0.0.1" + response.items[0].status.addresses[1].address = "127.0.0.2" + response.items[1].status.addresses = [MagicMock()] + response.items[1].status.addresses[0].address = "127.0.0.3" + + with patch('kubernetes.config.load_incluster_config') as m: + output = list(list_all_k8s_cluster_nodes(client=client)) + m.assert_called_once() + + assert output == ["127.0.0.1", "127.0.0.2", "127.0.0.3"] + + +def test_client_uses_kubeconfig(): + with patch('kubernetes.config.load_kube_config') as m: + list(list_all_k8s_cluster_nodes(kube_config="/location", client=MagicMock())) + m.assert_called_once_with(config_file="/location")