From 1fdb6386c4bb42748530d7a9bf58ded644d77749 Mon Sep 17 00:00:00 2001 From: htuch Date: Mon, 16 Apr 2018 14:56:15 -0400 Subject: [PATCH] Reconcile envoyproxy/data-plane-api and envoyproxy/envoy (#3036) This PR implements the planned merge of envoyproxy/data-plane-api into envoyproxy/envoy as described in #2934 and https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!topic/envoy-dev/KcVHFH-zQwQ. Risk Level: Medium (there might be unintentional breakage of dependent builds). Testing: CI passes. There is now an additional bazel.api do_ci.sh target to build and run API tests. Fixes #2934. Signed-off-by: Harvey Tuch --- .circleci/config.yml | 15 +- BUILD | 2 + CONTRIBUTING.md | 5 +- GOVERNANCE.md | 30 +- PULL_REQUEST_TEMPLATE.md | 21 +- README.md | 3 +- REPO_LAYOUT.md | 5 +- VERSION | 1 + api/API_OVERVIEW.md | 128 + api/BUILD | 0 api/CONTRIBUTING.md | 77 + api/README.md | 17 + api/STYLE.md | 133 + api/XDS_PROTOCOL.md | 300 ++ api/bazel/BUILD | 0 api/bazel/api_build_system.bzl | 144 + api/bazel/repositories.bzl | 225 + api/diagrams/ads.svg | 9 + api/diagrams/cds-eds-resources.svg | 8 + api/diagrams/eds-distinct-stream.svg | 10 + api/diagrams/eds-same-stream.svg | 6 + api/diagrams/envoy-perf-script.svg | 4795 +++++++++++++++++ api/diagrams/later-ack.svg | 8 + api/diagrams/simple-ack.svg | 6 + api/diagrams/simple-nack.svg | 6 + api/diagrams/stale-requests.svg | 11 + api/diagrams/update-race.svg | 11 + api/docs/BUILD | 57 + api/envoy/admin/v2/BUILD | 12 + api/envoy/admin/v2/config_dump.proto | 35 + api/envoy/api/v2/BUILD | 137 + api/envoy/api/v2/README.md | 9 + api/envoy/api/v2/auth/BUILD | 52 + api/envoy/api/v2/auth/auth.proto | 53 + api/envoy/api/v2/auth/cert.proto | 260 + api/envoy/api/v2/cds.proto | 437 ++ api/envoy/api/v2/cluster/BUILD | 35 + .../api/v2/cluster/circuit_breaker.proto | 52 + .../api/v2/cluster/outlier_detection.proto | 78 + api/envoy/api/v2/core/BUILD | 121 + api/envoy/api/v2/core/address.proto | 95 + api/envoy/api/v2/core/base.proto | 184 + api/envoy/api/v2/core/config_source.proto | 87 + api/envoy/api/v2/core/grpc_service.proto | 105 + api/envoy/api/v2/core/health_check.proto | 202 + api/envoy/api/v2/core/http_uri.proto | 47 + api/envoy/api/v2/core/protocol.proto | 84 + api/envoy/api/v2/discovery.proto | 95 + api/envoy/api/v2/eds.proto | 63 + api/envoy/api/v2/endpoint/BUILD | 47 + api/envoy/api/v2/endpoint/endpoint.proto | 119 + api/envoy/api/v2/endpoint/load_report.proto | 96 + api/envoy/api/v2/lds.proto | 169 + api/envoy/api/v2/listener/BUILD | 24 + api/envoy/api/v2/listener/listener.proto | 133 + api/envoy/api/v2/ratelimit/BUILD | 14 + api/envoy/api/v2/ratelimit/ratelimit.proto | 62 + api/envoy/api/v2/rds.proto | 89 + api/envoy/api/v2/route/BUILD | 24 + api/envoy/api/v2/route/route.proto | 896 +++ api/envoy/config/README.md | 3 + api/envoy/config/accesslog/v2/BUILD | 11 + api/envoy/config/accesslog/v2/als.proto | 42 + api/envoy/config/bootstrap/v2/BUILD | 37 + api/envoy/config/bootstrap/v2/bootstrap.proto | 215 + api/envoy/config/filter/README.md | 24 + api/envoy/config/filter/accesslog/v2/BUILD | 33 + .../filter/accesslog/v2/accesslog.proto | 438 ++ api/envoy/config/filter/fault/v2/BUILD | 12 + api/envoy/config/filter/fault/v2/fault.proto | 40 + api/envoy/config/filter/http/buffer/v2/BUILD | 8 + .../config/filter/http/buffer/v2/buffer.proto | 24 + .../filter/http/ext_authz/v2alpha/BUILD | 12 + .../http/ext_authz/v2alpha/ext_authz.proto | 34 + api/envoy/config/filter/http/fault/v2/BUILD | 12 + .../config/filter/http/fault/v2/fault.proto | 59 + api/envoy/config/filter/http/gzip/v2/BUILD | 8 + .../config/filter/http/gzip/v2/gzip.proto | 71 + .../config/filter/http/health_check/v2/BUILD | 19 + .../http/health_check/v2/health_check.proto | 33 + .../config/filter/http/ip_tagging/v2/BUILD | 9 + .../http/ip_tagging/v2/ip_tagging.proto | 48 + .../filter/http/jwt_authn/v2alpha/BUILD | 13 + .../filter/http/jwt_authn/v2alpha/README.md | 31 + .../http/jwt_authn/v2alpha/config.proto | 226 + api/envoy/config/filter/http/lua/v2/BUILD | 8 + api/envoy/config/filter/http/lua/v2/lua.proto | 17 + .../config/filter/http/rate_limit/v2/BUILD | 8 + .../http/rate_limit/v2/rate_limit.proto | 37 + api/envoy/config/filter/http/router/v2/BUILD | 9 + .../config/filter/http/router/v2/router.proto | 29 + api/envoy/config/filter/http/squash/v2/BUILD | 8 + .../config/filter/http/squash/v2/squash.proto | 51 + .../config/filter/http/transcoder/v2/BUILD | 8 + .../http/transcoder/v2/transcoder.proto | 63 + .../filter/network/client_ssl_auth/v2/BUILD | 9 + .../client_ssl_auth/v2/client_ssl_auth.proto | 37 + .../config/filter/network/ext_authz/v2/BUILD | 9 + .../network/ext_authz/v2/ext_authz.proto | 28 + .../network/http_connection_manager/v2/BUILD | 29 + .../v2/http_connection_manager.proto | 306 ++ .../filter/network/mongo_proxy/v2/BUILD | 9 + .../network/mongo_proxy/v2/mongo_proxy.proto | 28 + .../config/filter/network/rate_limit/v2/BUILD | 9 + .../network/rate_limit/v2/rate_limit.proto | 29 + .../filter/network/redis_proxy/v2/BUILD | 8 + .../network/redis_proxy/v2/redis_proxy.proto | 37 + .../config/filter/network/tcp_proxy/v2/BUILD | 13 + .../network/tcp_proxy/v2/tcp_proxy.proto | 134 + .../config/health_checker/redis/v2/BUILD | 8 + .../health_checker/redis/v2/redis.proto | 16 + api/envoy/config/metrics/v2/BUILD | 41 + .../config/metrics/v2/metrics_service.proto | 17 + api/envoy/config/metrics/v2/stats.proto | 202 + api/envoy/config/ratelimit/v2/BUILD | 22 + api/envoy/config/ratelimit/v2/rls.proto | 30 + api/envoy/config/trace/v2/BUILD | 22 + api/envoy/config/trace/v2/trace.proto | 77 + api/envoy/service/README.md | 3 + api/envoy/service/accesslog/v2/BUILD | 14 + api/envoy/service/accesslog/v2/als.proto | 66 + api/envoy/service/auth/v2alpha/BUILD | 24 + .../auth/v2alpha/attribute_context.proto | 133 + .../service/auth/v2alpha/external_auth.proto | 44 + api/envoy/service/discovery/v2/BUILD | 58 + api/envoy/service/discovery/v2/ads.proto | 30 + api/envoy/service/discovery/v2/hds.proto | 123 + api/envoy/service/discovery/v2/sds.proto | 25 + api/envoy/service/load_stats/v2/BUILD | 22 + api/envoy/service/load_stats/v2/lrs.proto | 68 + api/envoy/service/metrics/v2/BUILD | 15 + .../service/metrics/v2/metrics_service.proto | 39 + api/envoy/service/ratelimit/v2/BUILD | 22 + api/envoy/service/ratelimit/v2/rls.proto | 78 + api/envoy/service/trace/v2/BUILD | 14 + .../service/trace/v2/trace_service.proto | 42 + api/envoy/type/BUILD | 25 + api/envoy/type/percent.proto | 50 + api/envoy/type/range.proto | 20 + api/examples/service_envoy/BUILD | 6 + .../service_envoy/http_connection_manager.pb | 25 + api/examples/service_envoy/listeners.pb | 11 + api/test/build/BUILD | 39 + api/test/build/build_test.cc | 34 + api/test/build/go_build_test.go | 21 + api/test/validate/BUILD | 33 + api/test/validate/pgv_test.cc | 76 + api/tools/BUILD | 23 + api/tools/generate_listeners.py | 67 + api/tools/generate_listeners_test.py | 15 + bazel/EXTERNAL_DEPS.md | 3 - bazel/repositories.bzl | 34 +- bazel/repository_locations.bzl | 9 +- ci/README.md | 3 +- ci/api_mirror.sh | 46 + ci/do_ci.sh | 11 +- docs/README.md | 10 +- docs/build.sh | 111 + docs/conf.py | 221 + docs/publish.sh | 52 +- docs/requirements.txt | 19 + docs/root/_static/docker_compose_v0.1.svg | 4 + docs/root/_static/double_proxy.svg | 4 + docs/root/_static/front_proxy.svg | 4 + docs/root/_static/placeholder | 0 docs/root/_static/service_to_service.svg | 4 + docs/root/about_docs.rst | 20 + docs/root/api-v1/access_log.rst | 181 + docs/root/api-v1/admin.rst | 27 + docs/root/api-v1/api.rst | 19 + docs/root/api-v1/cluster_manager/cds.rst | 45 + docs/root/api-v1/cluster_manager/cluster.rst | 205 + .../cluster_circuit_breakers.rst | 64 + .../api-v1/cluster_manager/cluster_hc.rst | 91 + .../cluster_manager/cluster_manager.rst | 51 + .../cluster_outlier_detection.rst | 101 + .../cluster_ring_hash_lb_config.rst | 26 + .../api-v1/cluster_manager/cluster_ssl.rst | 82 + docs/root/api-v1/cluster_manager/outlier.rst | 15 + docs/root/api-v1/cluster_manager/sds.rst | 85 + .../api-v1/http_filters/buffer_filter.rst | 24 + docs/root/api-v1/http_filters/cors_filter.rst | 13 + .../api-v1/http_filters/dynamodb_filter.rst | 19 + .../root/api-v1/http_filters/fault_filter.rst | 94 + .../http_filters/grpc_http1_bridge_filter.rst | 13 + .../grpc_json_transcoder_filter.rst | 64 + .../api-v1/http_filters/grpc_web_filter.rst | 13 + .../http_filters/health_check_filter.rst | 28 + .../root/api-v1/http_filters/http_filters.rst | 8 + docs/root/api-v1/http_filters/lua_filter.rst | 21 + .../api-v1/http_filters/rate_limit_filter.rst | 39 + .../api-v1/http_filters/router_filter.rst | 28 + .../api-v1/http_filters/squash_filter.rst | 56 + docs/root/api-v1/listeners/lds.rst | 49 + docs/root/api-v1/listeners/listeners.rst | 238 + .../client_ssl_auth_filter.rst | 47 + .../api-v1/network_filters/echo_filter.rst | 13 + .../api-v1/network_filters/http_conn_man.rst | 260 + .../network_filters/mongo_proxy_filter.rst | 53 + .../network_filters/network_filters.rst | 8 + .../network_filters/rate_limit_filter.rst | 40 + .../network_filters/redis_proxy_filter.rst | 46 + .../network_filters/tcp_proxy_filter.rst | 126 + docs/root/api-v1/rate_limit.rst | 28 + docs/root/api-v1/route_config/rate_limits.rst | 183 + docs/root/api-v1/route_config/rds.rst | 63 + docs/root/api-v1/route_config/route.rst | 553 ++ .../root/api-v1/route_config/route_config.rst | 92 + docs/root/api-v1/route_config/vcluster.rst | 47 + docs/root/api-v1/route_config/vhost.rst | 84 + docs/root/api-v1/runtime.rst | 34 + docs/root/api-v1/tracing.rst | 69 + docs/root/api-v2/api.rst | 16 + docs/root/api-v2/bootstrap/bootstrap.rst | 12 + docs/root/api-v2/clusters/clusters.rst | 13 + .../common_messages/common_messages.rst | 15 + docs/root/api-v2/config/filter/filter.rst | 11 + docs/root/api-v2/config/filter/http/http.rst | 8 + .../api-v2/config/filter/network/network.rst | 8 + docs/root/api-v2/http_routes/http_routes.rst | 9 + docs/root/api-v2/listeners/listeners.rst | 9 + docs/root/api-v2/types/types.rst | 9 + docs/root/configuration/access_log.rst | 208 + .../configuration/cluster_manager/cds.rst | 31 + .../cluster_circuit_breakers.rst | 17 + .../cluster_manager/cluster_hc.rst | 73 + .../cluster_manager/cluster_manager.rst | 17 + .../cluster_manager/cluster_runtime.rst | 131 + .../cluster_manager/cluster_stats.rst | 218 + docs/root/configuration/configuration.rst | 22 + .../http_conn_man/header_sanitizing.rst | 35 + .../configuration/http_conn_man/headers.rst | 482 ++ .../http_conn_man/http_conn_man.rst | 20 + docs/root/configuration/http_conn_man/rds.rst | 30 + .../http_conn_man/route_matching.rst | 19 + .../configuration/http_conn_man/runtime.rst | 36 + .../configuration/http_conn_man/stats.rst | 126 + .../http_conn_man/traffic_splitting.rst | 145 + .../http_filters/buffer_filter.rst | 23 + .../http_filters/cors_filter.rst | 12 + .../http_filters/dynamodb_filter.rst | 71 + .../http_filters/fault_filter.rst | 92 + .../http_filters/grpc_http1_bridge_filter.rst | 50 + .../grpc_json_transcoder_filter.rst | 37 + .../http_filters/grpc_web_filter.rst | 11 + .../http_filters/gzip_filter.rst | 51 + .../http_filters/health_check_filter.rst | 17 + .../http_filters/http_filters.rst | 22 + .../http_filters/ip_tagging_filter.rst | 41 + .../configuration/http_filters/lua_filter.rst | 417 ++ .../http_filters/rate_limit_filter.rst | 126 + .../http_filters/router_filter.rst | 297 + .../http_filters/squash_filter.rst | 40 + .../listener_filters/listener_filters.rst | 11 + .../listener_filters/original_dst_filter.rst | 14 + docs/root/configuration/listeners/lds.rst | 50 + .../configuration/listeners/listeners.rst | 17 + docs/root/configuration/listeners/runtime.rst | 8 + docs/root/configuration/listeners/stats.rst | 47 + .../client_ssl_auth_filter.rst | 59 + .../network_filters/echo_filter.rst | 10 + .../network_filters/mongo_proxy_filter.rst | 176 + .../network_filters/network_filters.rst | 18 + .../network_filters/rate_limit_filter.rst | 39 + .../network_filters/redis_proxy_filter.rst | 69 + .../network_filters/tcp_proxy_filter.rst | 28 + .../configuration/overview/v1_overview.rst | 117 + .../configuration/overview/v2_overview.rst | 544 ++ docs/root/configuration/rate_limit.rst | 18 + docs/root/configuration/runtime.rst | 96 + docs/root/configuration/statistics.rst | 45 + .../root/configuration/tools/router_check.rst | 170 + docs/root/extending/extending.rst | 10 + docs/root/faq/binaries.rst | 4 + docs/root/faq/how_fast_is_envoy.rst | 12 + docs/root/faq/lb_panic_threshold.rst | 6 + docs/root/faq/overview.rst | 14 + docs/root/faq/sni.rst | 52 + docs/root/faq/zipkin_tracing.rst | 7 + docs/root/faq/zone_aware_routing.rst | 61 + docs/root/favicon.ico | Bin 0 -> 67646 bytes docs/root/index.rst | 25 + docs/root/install/building.rst | 62 + docs/root/install/install.rst | 11 + docs/root/install/ref_configs.rst | 58 + .../install/sandboxes/local_docker_build.rst | 35 + .../install/tools/config_load_check_tool.rst | 30 + .../install/tools/route_table_check_tool.rst | 65 + .../tools/schema_validator_check_tool.rst | 33 + docs/root/install/tools/tools.rst | 9 + .../intro/arch_overview/access_logging.rst | 19 + .../intro/arch_overview/arch_overview.rst | 38 + .../intro/arch_overview/circuit_breaking.rst | 38 + .../intro/arch_overview/cluster_manager.rst | 49 + .../arch_overview/connection_pooling.rst | 37 + docs/root/intro/arch_overview/draining.rst | 35 + .../arch_overview/dynamic_configuration.rst | 84 + docs/root/intro/arch_overview/dynamo.rst | 18 + .../arch_overview/global_rate_limiting.rst | 31 + docs/root/intro/arch_overview/grpc.rst | 68 + .../intro/arch_overview/health_checking.rst | 106 + docs/root/intro/arch_overview/hot_restart.rst | 28 + .../http_connection_management.rst | 44 + .../root/intro/arch_overview/http_filters.rst | 24 + .../root/intro/arch_overview/http_routing.rst | 126 + docs/root/intro/arch_overview/init.rst | 24 + .../intro/arch_overview/listener_filters.rst | 16 + docs/root/intro/arch_overview/listeners.rst | 28 + .../intro/arch_overview/load_balancing.rst | 477 ++ docs/root/intro/arch_overview/mongo.rst | 19 + .../intro/arch_overview/network_filters.rst | 22 + docs/root/intro/arch_overview/outlier.rst | 149 + docs/root/intro/arch_overview/redis.rst | 213 + docs/root/intro/arch_overview/runtime.rst | 16 + docs/root/intro/arch_overview/scripting.rst | 5 + .../intro/arch_overview/service_discovery.rst | 136 + docs/root/intro/arch_overview/ssl.rst | 97 + docs/root/intro/arch_overview/statistics.rst | 42 + docs/root/intro/arch_overview/tcp_proxy.rst | 18 + docs/root/intro/arch_overview/terminology.rst | 32 + .../intro/arch_overview/threading_model.rst | 13 + docs/root/intro/arch_overview/tracing.rst | 102 + docs/root/intro/arch_overview/websocket.rst | 36 + docs/root/intro/comparison.rst | 134 + .../deployment_types/deployment_types.rst | 12 + .../intro/deployment_types/double_proxy.rst | 26 + .../intro/deployment_types/front_proxy.rst | 26 + .../deployment_types/service_to_service.rst | 62 + docs/root/intro/getting_help.rst | 15 + docs/root/intro/intro.rst | 14 + docs/root/intro/version_history.rst | 418 ++ docs/root/intro/what_is_envoy.rst | 125 + docs/root/operations/admin.rst | 253 + docs/root/operations/cli.rst | 231 + docs/root/operations/fs_flags.rst | 13 + docs/root/operations/hot_restarter.rst | 37 + docs/root/operations/operations.rst | 14 + docs/root/operations/runtime.rst | 8 + docs/root/operations/stats_overview.rst | 13 + docs/root/start/distro/ambassador.rst | 125 + docs/root/start/sandboxes/front_proxy.rst | 228 + docs/root/start/sandboxes/grpc_bridge.rst | 68 + docs/root/start/sandboxes/jaeger_tracing.rst | 81 + docs/root/start/sandboxes/zipkin_tracing.rst | 83 + docs/root/start/start.rst | 163 + .../common/access_log/access_log_formatter.cc | 2 +- .../common/access_log/access_log_formatter.h | 2 +- source/common/common/BUILD | 2 +- source/extensions/access_loggers/file/BUILD | 2 +- .../filters/common/ext_authz/ext_authz_impl.h | 2 +- source/extensions/filters/http/buffer/BUILD | 2 +- source/extensions/filters/http/cors/BUILD | 2 +- source/extensions/filters/http/dynamo/BUILD | 2 +- .../extensions/filters/http/ext_authz/BUILD | 2 +- source/extensions/filters/http/fault/BUILD | 2 +- .../filters/http/grpc_http1_bridge/BUILD | 2 +- .../filters/http/grpc_json_transcoder/BUILD | 2 +- source/extensions/filters/http/grpc_web/BUILD | 2 +- source/extensions/filters/http/gzip/BUILD | 2 +- .../filters/http/health_check/BUILD | 2 +- .../extensions/filters/http/ip_tagging/BUILD | 2 +- source/extensions/filters/http/lua/BUILD | 2 +- .../extensions/filters/http/ratelimit/BUILD | 2 +- source/extensions/filters/http/router/BUILD | 2 +- source/extensions/filters/http/squash/BUILD | 2 +- .../filters/http/squash/squash_filter.cc | 3 +- .../filters/listener/original_dst/BUILD | 2 +- .../filters/network/client_ssl_auth/BUILD | 2 +- source/extensions/filters/network/echo/BUILD | 2 +- .../filters/network/ext_authz/BUILD | 2 +- .../network/http_connection_manager/BUILD | 2 +- .../filters/network/mongo_proxy/BUILD | 2 +- .../filters/network/ratelimit/BUILD | 2 +- .../filters/network/redis_proxy/BUILD | 2 +- .../filters/network/tcp_proxy/BUILD | 2 +- .../stat_sinks/metrics_service/BUILD | 2 +- sync.sh | 28 + tools/check_format.py | 37 +- tools/protodoc/BUILD | 11 + tools/protodoc/protodoc.bzl | 80 + tools/protodoc/protodoc.py | 722 +++ 381 files changed, 28242 insertions(+), 129 deletions(-) create mode 100644 VERSION create mode 100644 api/API_OVERVIEW.md create mode 100644 api/BUILD create mode 100644 api/CONTRIBUTING.md create mode 100644 api/README.md create mode 100644 api/STYLE.md create mode 100644 api/XDS_PROTOCOL.md create mode 100644 api/bazel/BUILD create mode 100644 api/bazel/api_build_system.bzl create mode 100644 api/bazel/repositories.bzl create mode 100644 api/diagrams/ads.svg create mode 100644 api/diagrams/cds-eds-resources.svg create mode 100644 api/diagrams/eds-distinct-stream.svg create mode 100644 api/diagrams/eds-same-stream.svg create mode 100644 api/diagrams/envoy-perf-script.svg create mode 100644 api/diagrams/later-ack.svg create mode 100644 api/diagrams/simple-ack.svg create mode 100644 api/diagrams/simple-nack.svg create mode 100644 api/diagrams/stale-requests.svg create mode 100644 api/diagrams/update-race.svg create mode 100644 api/docs/BUILD create mode 100644 api/envoy/admin/v2/BUILD create mode 100644 api/envoy/admin/v2/config_dump.proto create mode 100644 api/envoy/api/v2/BUILD create mode 100644 api/envoy/api/v2/README.md create mode 100644 api/envoy/api/v2/auth/BUILD create mode 100644 api/envoy/api/v2/auth/auth.proto create mode 100644 api/envoy/api/v2/auth/cert.proto create mode 100644 api/envoy/api/v2/cds.proto create mode 100644 api/envoy/api/v2/cluster/BUILD create mode 100644 api/envoy/api/v2/cluster/circuit_breaker.proto create mode 100644 api/envoy/api/v2/cluster/outlier_detection.proto create mode 100644 api/envoy/api/v2/core/BUILD create mode 100644 api/envoy/api/v2/core/address.proto create mode 100644 api/envoy/api/v2/core/base.proto create mode 100644 api/envoy/api/v2/core/config_source.proto create mode 100644 api/envoy/api/v2/core/grpc_service.proto create mode 100644 api/envoy/api/v2/core/health_check.proto create mode 100644 api/envoy/api/v2/core/http_uri.proto create mode 100644 api/envoy/api/v2/core/protocol.proto create mode 100644 api/envoy/api/v2/discovery.proto create mode 100644 api/envoy/api/v2/eds.proto create mode 100644 api/envoy/api/v2/endpoint/BUILD create mode 100644 api/envoy/api/v2/endpoint/endpoint.proto create mode 100644 api/envoy/api/v2/endpoint/load_report.proto create mode 100644 api/envoy/api/v2/lds.proto create mode 100644 api/envoy/api/v2/listener/BUILD create mode 100644 api/envoy/api/v2/listener/listener.proto create mode 100644 api/envoy/api/v2/ratelimit/BUILD create mode 100644 api/envoy/api/v2/ratelimit/ratelimit.proto create mode 100644 api/envoy/api/v2/rds.proto create mode 100644 api/envoy/api/v2/route/BUILD create mode 100644 api/envoy/api/v2/route/route.proto create mode 100644 api/envoy/config/README.md create mode 100644 api/envoy/config/accesslog/v2/BUILD create mode 100644 api/envoy/config/accesslog/v2/als.proto create mode 100644 api/envoy/config/bootstrap/v2/BUILD create mode 100644 api/envoy/config/bootstrap/v2/bootstrap.proto create mode 100644 api/envoy/config/filter/README.md create mode 100644 api/envoy/config/filter/accesslog/v2/BUILD create mode 100644 api/envoy/config/filter/accesslog/v2/accesslog.proto create mode 100644 api/envoy/config/filter/fault/v2/BUILD create mode 100644 api/envoy/config/filter/fault/v2/fault.proto create mode 100644 api/envoy/config/filter/http/buffer/v2/BUILD create mode 100644 api/envoy/config/filter/http/buffer/v2/buffer.proto create mode 100644 api/envoy/config/filter/http/ext_authz/v2alpha/BUILD create mode 100644 api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto create mode 100644 api/envoy/config/filter/http/fault/v2/BUILD create mode 100644 api/envoy/config/filter/http/fault/v2/fault.proto create mode 100644 api/envoy/config/filter/http/gzip/v2/BUILD create mode 100644 api/envoy/config/filter/http/gzip/v2/gzip.proto create mode 100644 api/envoy/config/filter/http/health_check/v2/BUILD create mode 100644 api/envoy/config/filter/http/health_check/v2/health_check.proto create mode 100644 api/envoy/config/filter/http/ip_tagging/v2/BUILD create mode 100644 api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto create mode 100644 api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD create mode 100644 api/envoy/config/filter/http/jwt_authn/v2alpha/README.md create mode 100644 api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto create mode 100644 api/envoy/config/filter/http/lua/v2/BUILD create mode 100644 api/envoy/config/filter/http/lua/v2/lua.proto create mode 100644 api/envoy/config/filter/http/rate_limit/v2/BUILD create mode 100644 api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto create mode 100644 api/envoy/config/filter/http/router/v2/BUILD create mode 100644 api/envoy/config/filter/http/router/v2/router.proto create mode 100644 api/envoy/config/filter/http/squash/v2/BUILD create mode 100644 api/envoy/config/filter/http/squash/v2/squash.proto create mode 100644 api/envoy/config/filter/http/transcoder/v2/BUILD create mode 100644 api/envoy/config/filter/http/transcoder/v2/transcoder.proto create mode 100644 api/envoy/config/filter/network/client_ssl_auth/v2/BUILD create mode 100644 api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto create mode 100644 api/envoy/config/filter/network/ext_authz/v2/BUILD create mode 100644 api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto create mode 100644 api/envoy/config/filter/network/http_connection_manager/v2/BUILD create mode 100644 api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto create mode 100644 api/envoy/config/filter/network/mongo_proxy/v2/BUILD create mode 100644 api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto create mode 100644 api/envoy/config/filter/network/rate_limit/v2/BUILD create mode 100644 api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto create mode 100644 api/envoy/config/filter/network/redis_proxy/v2/BUILD create mode 100644 api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto create mode 100644 api/envoy/config/filter/network/tcp_proxy/v2/BUILD create mode 100644 api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto create mode 100644 api/envoy/config/health_checker/redis/v2/BUILD create mode 100644 api/envoy/config/health_checker/redis/v2/redis.proto create mode 100644 api/envoy/config/metrics/v2/BUILD create mode 100644 api/envoy/config/metrics/v2/metrics_service.proto create mode 100644 api/envoy/config/metrics/v2/stats.proto create mode 100644 api/envoy/config/ratelimit/v2/BUILD create mode 100644 api/envoy/config/ratelimit/v2/rls.proto create mode 100644 api/envoy/config/trace/v2/BUILD create mode 100644 api/envoy/config/trace/v2/trace.proto create mode 100644 api/envoy/service/README.md create mode 100644 api/envoy/service/accesslog/v2/BUILD create mode 100644 api/envoy/service/accesslog/v2/als.proto create mode 100644 api/envoy/service/auth/v2alpha/BUILD create mode 100644 api/envoy/service/auth/v2alpha/attribute_context.proto create mode 100644 api/envoy/service/auth/v2alpha/external_auth.proto create mode 100644 api/envoy/service/discovery/v2/BUILD create mode 100644 api/envoy/service/discovery/v2/ads.proto create mode 100644 api/envoy/service/discovery/v2/hds.proto create mode 100644 api/envoy/service/discovery/v2/sds.proto create mode 100644 api/envoy/service/load_stats/v2/BUILD create mode 100644 api/envoy/service/load_stats/v2/lrs.proto create mode 100644 api/envoy/service/metrics/v2/BUILD create mode 100644 api/envoy/service/metrics/v2/metrics_service.proto create mode 100644 api/envoy/service/ratelimit/v2/BUILD create mode 100644 api/envoy/service/ratelimit/v2/rls.proto create mode 100644 api/envoy/service/trace/v2/BUILD create mode 100644 api/envoy/service/trace/v2/trace_service.proto create mode 100644 api/envoy/type/BUILD create mode 100644 api/envoy/type/percent.proto create mode 100644 api/envoy/type/range.proto create mode 100644 api/examples/service_envoy/BUILD create mode 100644 api/examples/service_envoy/http_connection_manager.pb create mode 100644 api/examples/service_envoy/listeners.pb create mode 100644 api/test/build/BUILD create mode 100644 api/test/build/build_test.cc create mode 100644 api/test/build/go_build_test.go create mode 100644 api/test/validate/BUILD create mode 100644 api/test/validate/pgv_test.cc create mode 100644 api/tools/BUILD create mode 100644 api/tools/generate_listeners.py create mode 100644 api/tools/generate_listeners_test.py create mode 100755 ci/api_mirror.sh create mode 100755 docs/build.sh create mode 100644 docs/conf.py create mode 100644 docs/requirements.txt create mode 100644 docs/root/_static/docker_compose_v0.1.svg create mode 100644 docs/root/_static/double_proxy.svg create mode 100644 docs/root/_static/front_proxy.svg create mode 100644 docs/root/_static/placeholder create mode 100644 docs/root/_static/service_to_service.svg create mode 100644 docs/root/about_docs.rst create mode 100644 docs/root/api-v1/access_log.rst create mode 100644 docs/root/api-v1/admin.rst create mode 100644 docs/root/api-v1/api.rst create mode 100644 docs/root/api-v1/cluster_manager/cds.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster_hc.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster_manager.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst create mode 100644 docs/root/api-v1/cluster_manager/cluster_ssl.rst create mode 100644 docs/root/api-v1/cluster_manager/outlier.rst create mode 100644 docs/root/api-v1/cluster_manager/sds.rst create mode 100644 docs/root/api-v1/http_filters/buffer_filter.rst create mode 100644 docs/root/api-v1/http_filters/cors_filter.rst create mode 100644 docs/root/api-v1/http_filters/dynamodb_filter.rst create mode 100644 docs/root/api-v1/http_filters/fault_filter.rst create mode 100644 docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst create mode 100644 docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst create mode 100644 docs/root/api-v1/http_filters/grpc_web_filter.rst create mode 100644 docs/root/api-v1/http_filters/health_check_filter.rst create mode 100644 docs/root/api-v1/http_filters/http_filters.rst create mode 100644 docs/root/api-v1/http_filters/lua_filter.rst create mode 100644 docs/root/api-v1/http_filters/rate_limit_filter.rst create mode 100644 docs/root/api-v1/http_filters/router_filter.rst create mode 100644 docs/root/api-v1/http_filters/squash_filter.rst create mode 100644 docs/root/api-v1/listeners/lds.rst create mode 100644 docs/root/api-v1/listeners/listeners.rst create mode 100644 docs/root/api-v1/network_filters/client_ssl_auth_filter.rst create mode 100644 docs/root/api-v1/network_filters/echo_filter.rst create mode 100644 docs/root/api-v1/network_filters/http_conn_man.rst create mode 100644 docs/root/api-v1/network_filters/mongo_proxy_filter.rst create mode 100644 docs/root/api-v1/network_filters/network_filters.rst create mode 100644 docs/root/api-v1/network_filters/rate_limit_filter.rst create mode 100644 docs/root/api-v1/network_filters/redis_proxy_filter.rst create mode 100644 docs/root/api-v1/network_filters/tcp_proxy_filter.rst create mode 100644 docs/root/api-v1/rate_limit.rst create mode 100644 docs/root/api-v1/route_config/rate_limits.rst create mode 100644 docs/root/api-v1/route_config/rds.rst create mode 100644 docs/root/api-v1/route_config/route.rst create mode 100644 docs/root/api-v1/route_config/route_config.rst create mode 100644 docs/root/api-v1/route_config/vcluster.rst create mode 100644 docs/root/api-v1/route_config/vhost.rst create mode 100644 docs/root/api-v1/runtime.rst create mode 100644 docs/root/api-v1/tracing.rst create mode 100644 docs/root/api-v2/api.rst create mode 100644 docs/root/api-v2/bootstrap/bootstrap.rst create mode 100644 docs/root/api-v2/clusters/clusters.rst create mode 100644 docs/root/api-v2/common_messages/common_messages.rst create mode 100644 docs/root/api-v2/config/filter/filter.rst create mode 100644 docs/root/api-v2/config/filter/http/http.rst create mode 100644 docs/root/api-v2/config/filter/network/network.rst create mode 100644 docs/root/api-v2/http_routes/http_routes.rst create mode 100644 docs/root/api-v2/listeners/listeners.rst create mode 100644 docs/root/api-v2/types/types.rst create mode 100644 docs/root/configuration/access_log.rst create mode 100644 docs/root/configuration/cluster_manager/cds.rst create mode 100644 docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst create mode 100644 docs/root/configuration/cluster_manager/cluster_hc.rst create mode 100644 docs/root/configuration/cluster_manager/cluster_manager.rst create mode 100644 docs/root/configuration/cluster_manager/cluster_runtime.rst create mode 100644 docs/root/configuration/cluster_manager/cluster_stats.rst create mode 100644 docs/root/configuration/configuration.rst create mode 100644 docs/root/configuration/http_conn_man/header_sanitizing.rst create mode 100644 docs/root/configuration/http_conn_man/headers.rst create mode 100644 docs/root/configuration/http_conn_man/http_conn_man.rst create mode 100644 docs/root/configuration/http_conn_man/rds.rst create mode 100644 docs/root/configuration/http_conn_man/route_matching.rst create mode 100644 docs/root/configuration/http_conn_man/runtime.rst create mode 100644 docs/root/configuration/http_conn_man/stats.rst create mode 100644 docs/root/configuration/http_conn_man/traffic_splitting.rst create mode 100644 docs/root/configuration/http_filters/buffer_filter.rst create mode 100644 docs/root/configuration/http_filters/cors_filter.rst create mode 100644 docs/root/configuration/http_filters/dynamodb_filter.rst create mode 100644 docs/root/configuration/http_filters/fault_filter.rst create mode 100644 docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst create mode 100644 docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst create mode 100644 docs/root/configuration/http_filters/grpc_web_filter.rst create mode 100644 docs/root/configuration/http_filters/gzip_filter.rst create mode 100644 docs/root/configuration/http_filters/health_check_filter.rst create mode 100644 docs/root/configuration/http_filters/http_filters.rst create mode 100644 docs/root/configuration/http_filters/ip_tagging_filter.rst create mode 100644 docs/root/configuration/http_filters/lua_filter.rst create mode 100644 docs/root/configuration/http_filters/rate_limit_filter.rst create mode 100644 docs/root/configuration/http_filters/router_filter.rst create mode 100644 docs/root/configuration/http_filters/squash_filter.rst create mode 100644 docs/root/configuration/listener_filters/listener_filters.rst create mode 100644 docs/root/configuration/listener_filters/original_dst_filter.rst create mode 100644 docs/root/configuration/listeners/lds.rst create mode 100644 docs/root/configuration/listeners/listeners.rst create mode 100644 docs/root/configuration/listeners/runtime.rst create mode 100644 docs/root/configuration/listeners/stats.rst create mode 100644 docs/root/configuration/network_filters/client_ssl_auth_filter.rst create mode 100644 docs/root/configuration/network_filters/echo_filter.rst create mode 100644 docs/root/configuration/network_filters/mongo_proxy_filter.rst create mode 100644 docs/root/configuration/network_filters/network_filters.rst create mode 100644 docs/root/configuration/network_filters/rate_limit_filter.rst create mode 100644 docs/root/configuration/network_filters/redis_proxy_filter.rst create mode 100644 docs/root/configuration/network_filters/tcp_proxy_filter.rst create mode 100644 docs/root/configuration/overview/v1_overview.rst create mode 100644 docs/root/configuration/overview/v2_overview.rst create mode 100644 docs/root/configuration/rate_limit.rst create mode 100644 docs/root/configuration/runtime.rst create mode 100644 docs/root/configuration/statistics.rst create mode 100644 docs/root/configuration/tools/router_check.rst create mode 100644 docs/root/extending/extending.rst create mode 100644 docs/root/faq/binaries.rst create mode 100644 docs/root/faq/how_fast_is_envoy.rst create mode 100644 docs/root/faq/lb_panic_threshold.rst create mode 100644 docs/root/faq/overview.rst create mode 100644 docs/root/faq/sni.rst create mode 100644 docs/root/faq/zipkin_tracing.rst create mode 100644 docs/root/faq/zone_aware_routing.rst create mode 100644 docs/root/favicon.ico create mode 100644 docs/root/index.rst create mode 100644 docs/root/install/building.rst create mode 100644 docs/root/install/install.rst create mode 100644 docs/root/install/ref_configs.rst create mode 100644 docs/root/install/sandboxes/local_docker_build.rst create mode 100644 docs/root/install/tools/config_load_check_tool.rst create mode 100644 docs/root/install/tools/route_table_check_tool.rst create mode 100644 docs/root/install/tools/schema_validator_check_tool.rst create mode 100644 docs/root/install/tools/tools.rst create mode 100644 docs/root/intro/arch_overview/access_logging.rst create mode 100644 docs/root/intro/arch_overview/arch_overview.rst create mode 100644 docs/root/intro/arch_overview/circuit_breaking.rst create mode 100644 docs/root/intro/arch_overview/cluster_manager.rst create mode 100644 docs/root/intro/arch_overview/connection_pooling.rst create mode 100644 docs/root/intro/arch_overview/draining.rst create mode 100644 docs/root/intro/arch_overview/dynamic_configuration.rst create mode 100644 docs/root/intro/arch_overview/dynamo.rst create mode 100644 docs/root/intro/arch_overview/global_rate_limiting.rst create mode 100644 docs/root/intro/arch_overview/grpc.rst create mode 100644 docs/root/intro/arch_overview/health_checking.rst create mode 100644 docs/root/intro/arch_overview/hot_restart.rst create mode 100644 docs/root/intro/arch_overview/http_connection_management.rst create mode 100644 docs/root/intro/arch_overview/http_filters.rst create mode 100644 docs/root/intro/arch_overview/http_routing.rst create mode 100644 docs/root/intro/arch_overview/init.rst create mode 100644 docs/root/intro/arch_overview/listener_filters.rst create mode 100644 docs/root/intro/arch_overview/listeners.rst create mode 100644 docs/root/intro/arch_overview/load_balancing.rst create mode 100644 docs/root/intro/arch_overview/mongo.rst create mode 100644 docs/root/intro/arch_overview/network_filters.rst create mode 100644 docs/root/intro/arch_overview/outlier.rst create mode 100644 docs/root/intro/arch_overview/redis.rst create mode 100644 docs/root/intro/arch_overview/runtime.rst create mode 100644 docs/root/intro/arch_overview/scripting.rst create mode 100644 docs/root/intro/arch_overview/service_discovery.rst create mode 100644 docs/root/intro/arch_overview/ssl.rst create mode 100644 docs/root/intro/arch_overview/statistics.rst create mode 100644 docs/root/intro/arch_overview/tcp_proxy.rst create mode 100644 docs/root/intro/arch_overview/terminology.rst create mode 100644 docs/root/intro/arch_overview/threading_model.rst create mode 100644 docs/root/intro/arch_overview/tracing.rst create mode 100644 docs/root/intro/arch_overview/websocket.rst create mode 100644 docs/root/intro/comparison.rst create mode 100644 docs/root/intro/deployment_types/deployment_types.rst create mode 100644 docs/root/intro/deployment_types/double_proxy.rst create mode 100644 docs/root/intro/deployment_types/front_proxy.rst create mode 100644 docs/root/intro/deployment_types/service_to_service.rst create mode 100644 docs/root/intro/getting_help.rst create mode 100644 docs/root/intro/intro.rst create mode 100644 docs/root/intro/version_history.rst create mode 100644 docs/root/intro/what_is_envoy.rst create mode 100644 docs/root/operations/admin.rst create mode 100644 docs/root/operations/cli.rst create mode 100644 docs/root/operations/fs_flags.rst create mode 100644 docs/root/operations/hot_restarter.rst create mode 100644 docs/root/operations/operations.rst create mode 100644 docs/root/operations/runtime.rst create mode 100644 docs/root/operations/stats_overview.rst create mode 100644 docs/root/start/distro/ambassador.rst create mode 100644 docs/root/start/sandboxes/front_proxy.rst create mode 100644 docs/root/start/sandboxes/grpc_bridge.rst create mode 100644 docs/root/start/sandboxes/jaeger_tracing.rst create mode 100644 docs/root/start/sandboxes/zipkin_tracing.rst create mode 100644 docs/root/start/start.rst create mode 100755 sync.sh create mode 100644 tools/protodoc/BUILD create mode 100644 tools/protodoc/protodoc.bzl create mode 100755 tools/protodoc/protodoc.py diff --git a/.circleci/config.yml b/.circleci/config.yml index c602f2855f06..c517b786a63b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -37,6 +37,15 @@ jobs: steps: - checkout - run: ci/do_circle_ci.sh bazel.tsan + api: + docker: + - image: *envoy-build-image + resource_class: xlarge + working_directory: /source + steps: + - checkout + - run: ci/do_circle_ci.sh bazel.api + - run: ci/api_mirror.sh ipv6_tests: machine: true steps: @@ -96,8 +105,11 @@ jobs: steps: - run: sleep 30 # workaround GH sync issue - checkout - - add_ssh_keys - run: ci/do_circle_ci.sh docs + - add_ssh_keys + - run: docs/publish.sh + - store_artifacts: + path: generated/docs mac: macos: xcode: "9.3.0" @@ -118,6 +130,7 @@ workflows: only: /^v.*/ - asan - tsan + - api - ipv6_tests - coverage - format diff --git a/BUILD b/BUILD index 779d1695d3b7..bd487a868c0f 100644 --- a/BUILD +++ b/BUILD @@ -1 +1,3 @@ licenses(["notice"]) # Apache 2 + +exports_files(["VERSION"]) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6211bd149335..c259add4a734 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -68,9 +68,8 @@ maximize the chances of your PR being merged. * PRs are expected to have 100% test coverage for added code. This can be verified with a coverage build. If your PR cannot have 100% coverage for some reason please clearly explain why when you open it. -* Any PR that changes user-facing behavior **must** have associated documentation in - [data-plane-api](https://github.com/envoyproxy/data-plane-api/tree/master/docs) as well as - [release notes](https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/intro/version_history.rst). +* Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as + well as [release notes](docs/root/intro/version_history.rst). * All code comments and documentation are expected to have proper English grammar and punctuation. If you are not a fluent English speaker (or a bad writer ;-)) please let us know and we will try to find some help but there are no guarantees. diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 2fc812bd55c6..3de62f5bcfce 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -69,22 +69,17 @@ * We do releases approximately every 3 months as described in the [release cadence documentation](CONTRIBUTING.md#release-cadence). * Decide on the somewhat arbitrary time that a release will occur. -* Begin marshalling the ongoing PR flow in both this repo and - [data-plane-api](https://github.com/envoyproxy/data-plane-api). Ask maintainers to hold off - merging any particularly risky PRs until after the release is tagged. This is because we currently - don't use release branches and assume that master is RC quality at all times. At the same time, - try to make sure that data-plane-api doc PRs are only merged *after* the Envoy PR so that we don't - wind up with stale docs. -* Do a final check of the [release notes](https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/intro/version_history.rst) - and make any needed corrections. -* Switch the [data-plane-api VERSION](https://github.com/envoyproxy/data-plane-api/blob/master/VERSION) from a - "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". Get a review and merge. -* Update the [data-plane-api SHA in Envoy](https://github.com/envoyproxy/envoy/blob/ed312500ec38876446ce8ee70a06f7cda4adc937/bazel/repository_locations.bzl#L79) - to the final release SHA. Get the PR approved and merge. +* Begin marshalling the ongoing PR flow in this repo. Ask maintainers to hold off merging any + particularly risky PRs until after the release is tagged. This is because we currently don't use + release branches and assume that master is RC quality at all times. +* Do a final check of the [release notes](docs/root/intro/version_history.rst) and make any needed + corrections. +* Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to + "1.6.0". Get a review and merge. * **Wait for tests to pass on master.** * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should - start with "v" and be followed by the version number. E.g., "v1.6.0". **This must match - the [data-plane-api VERSION](https://github.com/envoyproxy/data-plane-api/blob/master/VERSION).** + start with "v" and be followed by the version number. E.g., "v1.6.0". **This must match the + [VERSION](VERSION).** * Monitor the CircleCI tag build to make sure that the final docker images get pushed along with the final docs. The final documentation will end up in the [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy). @@ -92,10 +87,9 @@ * Craft a witty/uplifting email and send it to all the email aliases including envoy-announce@. * If possible post on Twitter (either have Matt do it or contact caniszczyk@ on Slack and have the Envoy account post). -* Do a new PR to update the [data-plane-api VERSION](https://github.com/envoyproxy/data-plane-api/blob/master/VERSION) - to the next development release. E.g., "1.7.0-dev". At the same time, also add a new empty section - to the [release notes](https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/intro/version_history.rst) - for the following version. E.g., "1.7.0". +* Do a new PR to update [VERSION](VERSION) to the next development release. E.g., "1.7.0-dev". At + the same time, also add a new empty section to the [release + notes](docs/root/intro/version_history.rst) for the following version. E.g., "1.7.0". * Update [DEPRECATED.md](DEPRECATED.md) to remove the '(pending)' comment on the current version, replacing it with the release date. Add a placeholder for the next version. diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 547f23baa849..8e119ccbd3dd 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -36,22 +36,17 @@ if you are unsure. A good rule of thumb is the riskier the change, the more comprehensive the testing should be. *Docs Changes*: ->Link to [Data Plane PR](https://github.com/envoyproxy/data-plane-api/pulls)] -if your PR involves documentation changes. Please write in N/A if there were no -documentation changes. +Description of documentation changes. These should be made in [docs/root](docs/root) and/or inline +with the API protos. Please write in N/A if there were no documentation changes. *Release Notes*: ->If this change is user impacting you **must** add a release note via a discrete PR to -[version_history.rst](https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/intro/version_history.rst). -Please include any relevant links. Each release note should be prefixed with the relevant subsystem -in alphabetical order (see existing examples as a guide) and include links to relevant parts of -the documentation. Often times, this PR can be done concurrently with the main documentation PR -for the feature. Thank you! Please write in N/A if there are no release notes. +>If this change is user impacting you **must** add a release note to +[version_history.rst](docs/root/intro/version_history.rst). Please include any relevant links. Each +release note should be prefixed with the relevant subsystem in alphabetical order (see existing +examples as a guide) and include links to relevant parts of the documentation. Thank you! Please +write in N/A if there are no release notes. [Optional Fixes #Issue] -[Optional *API Changes*:] ->Link to [Data Plane PR](https://github.com/envoyproxy/data-plane-api/pulls)] - [Optional *Deprecated*:] ->Description of what is deprecated. +>Description of what is [deprecated](DEPRECATED.md). diff --git a/README.md b/README.md index 087ad11962bb..6f4f8f03eea2 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,8 @@ to find out more about the origin story and design philosophy of Envoy ## Related -* [data-plane-api](https://github.com/envoyproxy/data-plane-api): v2 API definitions. +* [data-plane-api](https://github.com/envoyproxy/data-plane-api): v2 API definitions as a standalone + repository. This is a read-only mirror of [api](api/). * [envoy-perf](https://github.com/envoyproxy/envoy-perf): Performance testing framework. * [envoy-filter-example](https://github.com/envoyproxy/envoy-filter-example): Example of how to add new filters and link to the main repository. diff --git a/REPO_LAYOUT.md b/REPO_LAYOUT.md index 07e3e5846f92..1b87c888a946 100644 --- a/REPO_LAYOUT.md +++ b/REPO_LAYOUT.md @@ -5,11 +5,12 @@ as well as to clearly specify how extensions are added to the repository. The to are: * [.circleci/](.circleci/): Configuration for [CircleCI](https://circleci.com/gh/envoyproxy). +* [api/](api/): Envoy data plane API. * [bazel/](bazel/): Configuration for Envoy's use of [Bazel](https://bazel.build/). * [ci/](ci/): Scripts used both during CI as well as to build Docker containers. * [configs/](configs/): Example Envoy configurations. -* [docs/](docs/): Project level documentation as well as scripts for publishing final docs during - releases. +* [docs/](docs/): End user facing Envoy proxy and data plane API documentation as well as scripts + for publishing final docs during releases. * [examples/](examples/): Larger Envoy examples using Docker and Docker Compose. * [include/](include/): "Public" interface headers for "core" Envoy. In general, these are almost entirely 100% abstract classes. There are a few cases of not-abstract classes in diff --git a/VERSION b/VERSION new file mode 100644 index 000000000000..de023c91b16b --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.7.0-dev diff --git a/api/API_OVERVIEW.md b/api/API_OVERVIEW.md new file mode 100644 index 000000000000..c509789a3502 --- /dev/null +++ b/api/API_OVERVIEW.md @@ -0,0 +1,128 @@ +# Envoy v2 APIs for developers + +## Goals + +This repository contains both the implemented and draft v2 JSON REST and gRPC +[Envoy](https://github.com/envoyproxy/envoy/) APIs. + +Version 2 of the Envoy API evolves existing APIs and introduces new APIs to: + +* Allow for more advanced load balancing through load and resource utilization reporting to management servers. +* Improve N^2 health check scalability issues by optionally offloading health checking to other Envoy instances. +* Support Envoy deployment in edge, sidecar and middle proxy deployment models via changes to the listener model and CDS/SDS APIs. +* Allow streaming updates from the management server on change, instead of polling APIs from Envoy. gRPC APIs will be supported + alongside JSON REST APIs to provide for this. +* Ensure all Envoy runtime configuration is dynamically discoverable via API + calls, including listener configuration, certificates and runtime settings, which are today sourced from the filesystem. There + will still remain a static bootstrap configuration file that will specify items + unlikely to change during runtime, including the Envoy node identity, xDS + management server addresses, administration interface and tracing + configuration. +* Revisit and where appropriate cleanup any v1 technical debt. + +## Status + +See +[here](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview.html#status) +for the current status of the v2 APIs. + +See [here](CONTRIBUTING.md#api-changes) for the v2 API change process. + +## Principles + +* [Proto3](https://developers.google.com/protocol-buffers/docs/proto3) will be + used to specify the canonical API. This will provide directly the gRPC API and + via gRPC-JSON transcoding the JSON REST API. A textual YAML input will be + supported for filesystem configuration files (e.g. the bootstrap file), in + addition to JSON, as a syntactic convenience. YAML file contents will be + internally converted to JSON and then follow the standard JSON-proto3 + conversion during Envoy config ingestion. + +* xDS APIs should support eventual consistency. For example, if RDS references a + cluster that has not yet been supplied by CDS, it should be silently ignored + and traffic not forwarded until the CDS update occurs. Stronger consistency + guarantees are possible if the management server is able to sequence the xDS + APIs carefully (for example by using the ADS API below). By following the + `[CDS, EDS, LDS, RDS]` sequence for all pertinent resources, it will be + possible to avoid traffic outages during configuration update. + +* The API is primarily intended for machine generation and consumption. It is + expected that the management server is responsible for mapping higher level + configuration concepts to API responses. Similarly, static configuration + fragments may be generated by templating tools, etc. The APIs and tools + used to generate xDS configuration are beyond the scope of the definitions in + this repository. + +* REST-JSON API equivalents will be provided for the basic singleton xDS + subscription services CDS/EDS/LDS/RDS/SDS. Advanced APIs such as HDS, ADS and + EDS multi-dimensional LB will be gRPC only. This avoids having to map + complicated bidirectional stream semantics onto REST. + +* Listeners will be immutable. Any updates to a listener via LDS will require + the draining of existing connections for the specific bound IP/port. As a + result, new requests will only be guaranteed to observe the new configuration + after existing connections have drained or the drain timeout. + +* Versioning will be expressed via [proto3 package + namespaces](https://developers.google.com/protocol-buffers/docs/proto3#packages), + i.e. `package envoy.api.v2;`. + +* Custom components (e.g. filters, resolvers, loggers) will use a reverse DNS naming scheme, + e.g. `com.google.widget`, `com.lyft.widget`. + +## APIs + +Unless otherwise stated, the APIs with the same names as v1 APIs have a similar role. + +* [Cluster Discovery Service (CDS)](envoy/api/v2/cds.proto). +* [Endpoint Discovery Service (EDS)](envoy/api/v2/eds.proto). This has the same role as SDS in the [v1 API](https://www.envoyproxy.io/docs/envoy/latest/api-v1/cluster_manager/sds), + the new name better describes what the API does in practice. Advanced global load balancing capable of utilizing N-dimensional upstream metrics is now supported. +* [Health Discovery Service (HDS)](envoy/service/discovery/v2/hds.proto). This new API supports efficient endpoint health discovery by the management server via the Envoy instances it manages. Individual Envoy instances + will typically receive HDS instructions to health check a subset of all + endpoints. The health check subset may not be a subset of the Envoy instance's + EDS endpoints. +* [Listener Discovery Service (LDS)](envoy/api/v2/lds.proto). This new API supports dynamic discovery of the listener configuration (which ports to bind to, TLS details, filter chains, etc.). +* [Metric Service (MS)](envoy/service/metrics/v2/metrics_service.proto). This new API allows Envoy to push (stream) metrics forever for servers to consume. +* [Rate Limit Service (RLS)](envoy/service/ratelimit/v2/rls.proto) +* [Route Discovery Service (RDS)](envoy/api/v2/rds.proto). +* [Secret Discovery Service (SDS)](envoy/service/discovery/v2/sds.proto). + +In addition to the above APIs, an aggregation API will be provided to allow for +fine grained control over the sequencing of API updates across discovery +services: + +* [Aggregated Discovery Service (ADS)](envoy/api/v2/discovery.proto). See + the [ADS overview](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#aggregated-discovery-service). + +A protocol description for the xDS APIs is provided [here](XDS_PROTOCOL.md). + +## Terminology + +Some relevant [existing terminology](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/terminology.html) is +repeated below and some new v2 terms introduced. + +* Cluster: A cluster is a group of logically similar endpoints that Envoy + connects to. In v2, RDS routes points to clusters, CDS provides cluster configuration and + Envoy discovers the cluster members via EDS. + +* Downstream: A downstream host connects to Envoy, sends requests, and receives responses. + +* Endpoint: An endpoint is an upstream host that is a member of one or more clusters. Endpoints are discovered via EDS. + +* Listener: A listener is a named network location (e.g., port, unix domain socket, etc.) that can be connected to by downstream clients. Envoy exposes one or more listeners that downstream hosts connect to. + +* Locality: A location where an Envoy instance or an endpoint runs. This includes + region, zone and sub-zone identification. + +* Management server: A logical server implementing the v2 Envoy APIs. This is not necessarily a single physical machine since it may be replicated/sharded and API serving for different xDS APIs may be implemented on different physical machines. + +* Region: Geographic region where a zone is located. + +* Sub-zone: Location within a zone where an Envoy instance or an endpoint runs. + This allows for multiple load balancing targets within a zone. + +* Upstream: An upstream host receives connections and requests from Envoy and returns responses. + +* xDS: CDS/EDS/HDS/LDS/RLS/RDS/SDS APIs. + +* Zone: Availability Zone (AZ) in AWS, Zone in GCP. diff --git a/api/BUILD b/api/BUILD new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/api/CONTRIBUTING.md b/api/CONTRIBUTING.md new file mode 100644 index 000000000000..c962f1c01e18 --- /dev/null +++ b/api/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing guide + +## API changes + +All API changes should follow the [style guide](STYLE.md). + +API changes are regular PRs in https://github.com/envoyproxy/envoy for the API/configuration +changes. They may be as part of a larger implementation PR. Please follow the standard Bazel and CI +process for validating build/test sanity of `api/` before submitting a PR. + +*Note: New .proto files should be also included to [build.sh](https://github.com/envoyproxy/envoy/blob/master/docs/build.sh) and +[BUILD](https://github.com/envoyproxy/envoy/blob/master/api/docs/BUILD) in order to get the RSTs generated.* + +## Documentation changes + +The Envoy project takes documentation seriously. We view it as one of the reasons the project has +seen rapid adoption. As such, it is required that all features have complete documentation. This is +generally going to be a combination of API documentation as well as architecture/overview +documentation. + +### Building documentation locally + +The documentation can be built locally in the root of https://github.com/envoyproxy/envoy via: + +``` +docs/build.sh +``` + +Or to use a hermetic docker container: + +``` +./ci/run_envoy_docker.sh './ci/do_ci.sh docs' +``` + +This process builds RST documentation directly from the proto files, merges it with the static RST +files, and then runs [Sphinx](http://www.sphinx-doc.org/en/stable/rest.html) over the entire tree to +produce the final documentation. The generated RST files are not committed as they are regenerated +every time the documentation is built. + +### Viewing documentation + +Once the documentation is built, it is available rooted at `generated/docs/index.html`. The +generated RST files are also viewable in `generated/rst`. + +Note also that the generated documentation can be viewed in CI: + +1. Open docs job in CircleCI. +2. Navigate to "artifacts" tab. +3. Expand files and click on `index.html`. + +If you do not see an artifacts tab this is a bug in CircleCI. Try logging out and logging back in. + +### Documentation guidelines + +The following are some general guidelines around documentation. + +* Cross link as much as possible. Sphinx is fantastic at this. Use it! See ample examples with the + existing documentation as a guide. +* Please use a **single space** after a period in documentation so that all generated text is + consistent. +* Comments can be left inside comments if needed (that's pretty deep, right?) via the `[#comment:]` + special tag. E.g., + + ``` + // This is a really cool field! + // [#comment:TODO(mattklein123): Do something cooler] + string foo_field = 3; + ``` + +* Prefer *italics* for emphasis as `backtick` emphasis is somewhat jarring in our Sphinx theme. +* All documentation is expected to use proper English grammar with proper punctuation. If you are + not a fluent English speaker please let us know and we will help out. +* Tag messages/enum/files with `[#proto-status: draft|experimental|frozen]` to + reflect their [API + status](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#status). + Frozen entities do not need to be tagged except when overriding an outer scope + draft or experimental status. diff --git a/api/README.md b/api/README.md new file mode 100644 index 000000000000..194ff76c6d4d --- /dev/null +++ b/api/README.md @@ -0,0 +1,17 @@ +# Data plane API + +This repository hosts the configuration and APIs that drive [Envoy](https://www.envoyproxy.io/). The +APIs are also in some cases used by other proxy solutions that aim to interoperate with management +systems and configuration generators that are built against this standard. Thus, we consider these a +set of *universal data plane* APIs. See [this](https://medium.com/@mattklein123/the-universal-data-plane-api-d15cec7a) +blog post for more information on the universal data plane concept. + +Additionally, all of the documentation for the Envoy project is built directly from this repository. +This allows us to keep all of our documentation next to the configuration and APIs that derive it. + +# Further reading + +* [API overview for developers](API_OVERVIEW.md) +* [API overview for users](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#) +* [xDS protocol overview](XDS_PROTOCOL.md) +* [Contributing guide](CONTRIBUTING.md) diff --git a/api/STYLE.md b/api/STYLE.md new file mode 100644 index 000000000000..d932c3a3b17c --- /dev/null +++ b/api/STYLE.md @@ -0,0 +1,133 @@ +# API style guidelines + +Generally follow guidance at https://cloud.google.com/apis/design/, in +particular for proto3 as described at: + +* https://cloud.google.com/apis/design/proto3 +* https://cloud.google.com/apis/design/naming_convention +* https://developers.google.com/protocol-buffers/docs/style + +In addition, the following conventions should be followed: + +* For protos that are [frozen](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#status), + the following guidelines are followed: + + * Fields should not be renumbered or have their types changed. This is standard proto development + procedure. + * If fields are deleted, the following syntax should be put in their place: + + ```proto + reserved ; + ``` + + E.g., + + ```proto + reserved 15; + ``` + + * Renaming of fields or package namespaces for a proto must not occur. This is inherently dangerous, since: + * Fields renames break wire compatibility. This is stricter than standard proto development procedure + in the sense that it does not break binary wire format. However, it **does** break loading + of YAML/JSON into protos as well as text protos. Since we consider YAML/JSON to be first class + inputs, we must not change field names. + + * For service definitions, the gRPC endpoint URL is inferred from package + namespace, so this will break client/server communication. + + * For a message embedded in an `Any` object, the type URL, which the package + namespace is a part of, may be used by Envoy or other API consuming code. + Currently, this applies to the top-level resources embedded in + `DiscoveryResponse` objects, e.g. `Cluster`, `Listener`, etc. + + * Consuming code will break and require source change to match the changes. + +* Non-frozen fields should be tagged with `[#not-implemented-hide:]`, `[#not-implemented-warn:]`, + `[#proto-status: draft]` or `[#proto-status: experimental]`. + +* Protos for configs and services that are not implemented immediately in + Envoy, or are under active design and development should be versioned + "v2alpha". If several iterations of the alpha API are expected, then versions + "v2alpha1", "v2alpha2", and so on are preferred. Alpha-versioned protos are + considered experimental and are not required to preserve compatibility. + +* Every proto directory should have a `README.md` describing its content. See + for example [envoy.service](envoy/service/README.md). + +* The data plane APIs are primarily intended for machine generation and consumption. + It is expected that the management server is responsible for mapping higher + level configuration concepts to concrete API concepts. Similarly, static configuration + fragments may be generated by tools and UIs, etc. The APIs and tools used + to generate xDS configuration are beyond the scope of the definitions in this + repository. + +* Use [wrapped scalar + types](https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto) + where there is a real need for the field to have a default value that does not + match the proto3 defaults (0/false/""). This should not be done for fields + where the proto3 defaults make sense. All things being equal, pick appropriate + logic, e.g. enable vs. disable for a `bool` field, such that the proto3 + defaults work, but only where this doesn't result in API gymnastics. + +* Always use plural field names for `repeated` fields, such as `filters`. + +* Always use upper camel case names for message types and enum types without embedded + acronyms, such as `HttpRequest`. + +* Prefer `oneof` selections to boolean overloads of fields, for example, prefer: + + ```proto + oneof path_secifier { + string simple_path = 1; + string regex_path = 2; + } + ``` + + to + + ```proto + string path = 1; + bool path_is_regex = 2; + ``` + + This is more efficient, extendable and self-describing. + +* The API includes two types for representing [percents](envoy/type/percent.proto). `Percent` is + effectively a double value in the range 0.0-100.0. `FractionalPercent` is an integral fraction + that can be used to create a truncated percentage also in the range 0.0-100.0. In high performance + paths, `FractionalPercent` is preferred as randomness calculations can be performed using integral + modulo and comparison operations only without any floating point conversions. Typically, most + users do not need infinite precision in these paths. + +* For enum types, if one of the enum values is used for most cases, make it the + first enum value with `0` numeric value. Otherwise, define the first enum + value like `TYPE_NAME_UNSPECIFIED = 0`, and treat it as an error. This design + pattern forces developers to explicitly choose the correct enum value for + their use case, and avoid misunderstanding of the default behavior. + +* Proto fields should be sorted logically, not by field number. For large protos, place a comment + at the top that specifies the next free field number. E.g., + + ``` + // [#comment:next free field: 28] + ``` + +## Package organization + +API definitions are layered hierarchically in packages from top-to-bottom: + +- `envoy.service` contains gRPC definitions of supporting services; +- `envoy.config` contains definitions for service configuration, filter +configuration, and bootstrap; +- `envoy.api.v2` contains definitions for EDS, CDS, RDS, LDS, and top-level +resources such as `Cluster`; +- `envoy.api.v2.endpoint`, `envoy.api.v2.cluster`, `envoy.api.v2.route`, +`envoy.api.v2.listener`, `envoy.api.v2.ratelimit` define sub-messages of the top-level resources; +- `envoy.api.v2.core` and `envoy.api.v2.auth` hold core definitions consumed +throughout the API. + +Dependencies are enforced from top-to-bottom using visibility constraints in +the build system to prevent circular dependency formation. Package group +`//envoy/api/v2:friends` selects consumers of the core API package (services and configs) +and is the default visibility for the core API packages. The default visibility +for services and configs should be `//docs` (proto documentation tool). diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md new file mode 100644 index 000000000000..f7fafc58b738 --- /dev/null +++ b/api/XDS_PROTOCOL.md @@ -0,0 +1,300 @@ +# xDS REST and gRPC protocol + +Envoy discovers its various dynamic resources via the filesystem or by querying +one or more management servers. Collectively, these discovery services and their +corresponding APIs are referred to as _xDS_. Resources are requested via +_subscriptions_, by specifying a filesystem path to watch, initiating gRPC +streams or polling a REST-JSON URL. The latter two methods involve sending +requests with a +[`DiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) +proto payload. Resources are delivered in a +[`DiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryresponse) +proto payload in all methods. We discuss each type of subscription below. + +## Filesystem subscriptions + +The simplest approach to delivering dynamic configuration is to place it at a +well known path specified in the +[`ConfigSource`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/config_source.proto#core-configsource). +Envoy will use `inotify` (`kqueue` on Mac OS X) to monitor the file for changes +and parse the `DiscoveryResponse` proto in the file on update. Binary +protobufs, JSON, YAML and proto text are supported formats for the +`DiscoveryResponse`. + +There is no mechanism available for filesystem subscriptions to ACK/NACK updates +beyond stats counters and logs. The last valid configuration for an xDS API will +continue to apply if an configuration update rejection occurs. + +## Streaming gRPC subscriptions + +### Singleton resource type discovery + +A gRPC +[`ApiConfigSource`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/config_source.proto#core-apiconfigsource) +can be specified independently for each xDS API, pointing at an upstream +cluster corresponding to a management server. This will initiate an independent +bidirectional gRPC stream for each xDS resource type, potentially to distinct +management servers. API delivery is eventually consistent. See +[ADS](#aggregated-discovery-service) below for situations in which explicit +control of sequencing is required. + +#### Type URLs + +Each xDS API is concerned with resources of a given type. There is a 1:1 +correspondence between an xDS API and a resource type. That is: + +* [LDS: `envoy.api.v2.Listener`](envoy/api/v2/lds.proto) +* [RDS: `envoy.api.v2.RouteConfiguration`](envoy/api/v2/rds.proto) +* [CDS: `envoy.api.v2.Cluster`](envoy/api/v2/cds.proto) +* [EDS: `envoy.api.v2.ClusterLoadAssignment`](envoy/api/v2/eds.proto) + +The concept of [_type +URLs_](https://developers.google.com/protocol-buffers/docs/proto3#any) appears +below, and takes the form `type.googleapis.com/`, e.g. +`type.googleapis.com/envoy.api.v2.Cluster` for CDS. In various requests from +Envoy and responses by the management server, the resource type URL is stated. + +#### ACK/NACK and versioning + +Each stream begins with a `DiscoveryRequest` from Envoy, specifying the list of +resources to subscribe to, the type URL corresponding to the subscribed +resources, the node identifier and an empty `version_info`. An example EDS request +might be: + +```yaml +version_info: +node: { id: envoy } +resource_names: +- foo +- bar +type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment +response_nonce: +``` + +The management server may reply either immediately or when the requested +resources are available with a `DiscoveryResponse`, e.g.: + +```yaml +version_info: X +resources: +- foo ClusterLoadAssignment proto encoding +- bar ClusterLoadAssignment proto encoding +type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment +nonce: A +``` + +After processing the `DiscoveryResponse`, Envoy will send a new request on the +stream, specifying the last version successfully applied and the nonce provided +by the management server. If the update was successfully applied, the +`version_info` will be __X__, as indicated in the sequence diagram: + +![Version update after ACK](diagrams/simple-ack.svg) + +In this sequence diagram, and below, the following format is used to abbreviate +messages: +* `DiscoveryRequest`: (V=`version_info`,R=`resource_names`,N=`response_nonce`,T=`type_url`) +* `DiscoveryResponse`: (V=`version_info`,R=`resources`,N=`nonce`,T=`type_url`) + +The version provides Envoy and the management server a shared notion of the +currently applied configuration, as well as a mechanism to ACK/NACK +configuration updates. If Envoy had instead rejected configuration update __X__, +it would reply with +[`error_detail`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#envoy-api-field-discoveryrequest-error-detail) +populated and its previous version, which in this case was the empty +initial version. The error_detail has more details around the exact error message +populated in the message field: + +![No version update after NACK](diagrams/simple-nack.svg) + +Later, an API update may succeed at a new version __Y__: + +![ACK after NACK](diagrams/later-ack.svg) + +Each stream has its own notion of versioning, there is no shared versioning +across resource types. When ADS is not used, even each resource of a given +resource type may have a +distinct version, since the Envoy API allows distinct EDS/RDS resources to point +at different `ConfigSource`s. + +#### When to send an update + +The management server should only send updates to the Envoy client when the +resources in the `DiscoveryResponse` have changed. Envoy replies to any +`DiscoveryResponse` with a `DiscoveryRequest` containing the ACK/NACK +immediately after it has been either accepted or rejected. If the management +server provides the same set of resources rather than waiting for a change to +occur, it will cause Envoy and the management server to spin and have a severe +performance impact. + +Within a stream, new `DiscoveryRequest`s supersede any prior `DiscoveryRequest`s +having the same resource type. This means that the management server only needs +to respond to the latest `DiscoveryRequest` on each stream for any given resource +type. + +#### Resource hints + +The `resource_names` specified in the `DiscoveryRequest` are a hint. Some +resource types, e.g. `Cluster`s and `Listener`s will specify an empty +`resource_names` list, since Envoy is interested in learning about all the +`Cluster`s (CDS) and `Listener`s (LDS) that the management server(s) know about +corresponding to its node identification. Other resource types, e.g. +`RouteConfiguration`s (RDS) and `ClusterLoadAssignment`s (EDS), follow from +earlier CDS/LDS updates and Envoy is able to explicitly enumerate these +resources. + +LDS/CDS resource hints will always be empty and it is expected that the +management server will provide the complete state of the LDS/CDS resources in +each response. An absent `Listener` or `Cluster` will be deleted. + +For EDS/RDS, the management server does not need to supply every requested +resource and may also supply additional, unrequested resources, `resource_names` +is only a hint. Envoy will silently ignore any superfluous resources. When a +requested resource is missing in a RDS or EDS update, Envoy will retain the last +known value for this resource. The management server may be able to infer all +the required EDS/RDS resources from the `node` identification in the +`DiscoveryRequest`, in which case this hint may be discarded. An empty EDS/RDS +`DiscoveryResponse` is effectively a nop from the perspective of the respective +resources in the Envoy. + +When a `Listener` or `Cluster` is deleted, its corresponding EDS and RDS +resources are also deleted inside the Envoy instance. In order for EDS resources +to be known or tracked by Envoy, there must exist an applied `Cluster` +definition (e.g. sourced via CDS). A similar relationship exists between RDS and +`Listeners` (e.g. sourced via LDS). + +For EDS/RDS, Envoy may either generate a distinct stream for each resource of a +given type (e.g. if each `ConfigSource` has its own distinct upstream cluster +for a management server), or may combine together multiple resource requests for +a given resource type when they are destined for the same management server. +This is left to implementation specifics, management servers should be capable +of handling one or more `resource_names` for a given resource type in each +request. Both sequence diagrams below are valid for fetching two EDS resources +`{foo, bar}`: + +![Multiple EDS requests on the same stream](diagrams/eds-same-stream.svg) +![Multiple EDS requests on distinct streams](diagrams/eds-distinct-stream.svg) + +#### Resource updates + +As discussed above, Envoy may update the list of `resource_names` it presents to +the management server in each `DiscoveryRequest` that ACK/NACKs a specific +`DiscoveryResponse`. In addition, Envoy may later issue additional +`DiscoveryRequest`s at a given `version_info` to update the management server +with new resource hints. For example, if Envoy is at EDS version __X__ and knows +only about cluster `foo`, but then receives a CDS update and learns about `bar` +in addition, it may issue an additional `DiscoveryRequest` for __X__ with +`{foo,bar}` as `resource_names`. + +![CDS response leads to EDS resource hint update](diagrams/cds-eds-resources.svg) + +There is a race condition that may arise here; if after a resource hint update +is issued by Envoy at __X__, but before the management server processes the +update it replies with a new version __Y__, the resource hint update may be +interpreted as a rejection of __Y__ by presenting an __X__ `version_info`. To +avoid this, the management server provides a `nonce` that Envoy uses to indicate +the specific `DiscoveryResponse` each `DiscoveryRequest` corresponds to: + +![EDS update race motivates nonces](diagrams/update-race.svg) + +The management server should not send a `DiscoveryResponse` for any +`DiscoveryRequest` that has a stale nonce. A nonce becomes stale following a +newer nonce being presented to Envoy in a `DiscoveryResponse`. A management +server does not need to send an update until it determines a new version is +available. Earlier requests at a version then also become stale. It may process +multiple `DiscoveryRequests` at a version until a new version is ready. + +![Requests become stale](diagrams/stale-requests.svg) + +An implication of the above resource update sequencing is that Envoy does not +expect a `DiscoveryResponse` for every `DiscoveryRequest` it issues. + +#### Eventual consistency considerations + +Since Envoy's xDS APIs are eventually consistent, traffic may drop briefly +during updates. For example, if only cluster __X__ is known via CDS/EDS, +a `RouteConfiguration` references cluster __X__ +and is then adjusted to cluster __Y__ just before the CDS/EDS update +providing __Y__, traffic will be blackholed until __Y__ is known about by the +Envoy instance. + +For some applications, a temporary drop of traffic is acceptable, retries at the +client or by other Envoy sidecars will hide this drop. For other scenarios where +drop can't be tolerated, traffic drop could have been avoided by providing a +CDS/EDS update with both __X__ and __Y__, then the RDS update repointing from +__X__ to __Y__ and then a CDS/EDS update dropping __X__. + +In general, to avoid traffic drop, sequencing of updates should follow a +`make before break` model, wherein +* CDS updates (if any) must always be pushed first. +* EDS updates (if any) must arrive after CDS updates for the respective clusters. +* LDS updates must arrive after corresponding CDS/EDS updates. +* RDS updates related to the newly added listeners must arrive in the end. +* Stale CDS clusters and related EDS endpoints (ones no longer being + referenced) can then be removed. + +xDS updates can be pushed independently if no new clusters/routes/listeners +are added or if it's acceptable to temporarily drop traffic during +updates. Note that in case of LDS updates, the listeners will be warmed +before they receive traffic, i.e. the dependent routes are fetched through +RDS if configured. On the other hand, clusters are not warmed when +adding/removing/updating clusters. Similarly, routes are not warmed -- +i.e., the management plane must ensure that clusters referenced by a route +are in place, before pushing the updates for a rotue. + +### Aggregated Discovery Services (ADS) + +It's challenging to provide the above guarantees on sequencing to avoid traffic +drop when management servers are distributed. ADS allow a single management +server, via a single gRPC stream, to deliver all API updates. This provides the +ability to carefully sequence updates to avoid traffic drop. With ADS, a single +stream is used with multiple independent `DiscoveryRequest`/`DiscoveryResponse` +sequences multiplexed via the type URL. For any given type URL, the above +sequencing of `DiscoveryRequest` and `DiscoveryResponse` messages applies. An +example update sequence might look like: + +![EDS/CDS multiplexed on an ADS stream](diagrams/ads.svg) + +A single ADS stream is available per Envoy instance. + +An example minimal `bootstrap.yaml` fragment for ADS configuration is: + +```yaml +node: + id: +dynamic_resources: + cds_config: {ads: {}} + lds_config: {ads: {}} + ads_config: + api_type: GRPC + cluster_name: [ads_cluster] +static_resources: + clusters: + - name: ads_cluster + connect_timeout: { seconds: 5 } + type: STATIC + hosts: + - socket_address: + address: + port_value: + lb_policy: ROUND_ROBIN + http2_protocol_options: {} +admin: + ... + +``` + +## REST-JSON polling subscriptions + +Synchronous (long) polling via REST endpoints is also available for the xDS +singleton APIs. The above sequencing of messages is similar, except no +persistent stream is maintained to the management server. It is expected that +there is only a single outstanding request at any point in time, and as a result +the response nonce is optional in REST-JSON. The [JSON canonical transform of +proto3](https://developers.google.com/protocol-buffers/docs/proto3#json) is used +to encode `DiscoveryRequest` and `DiscoveryResponse` messages. ADS is not +available for REST-JSON polling. + +When the poll period is set to a small value, with the intention of long +polling, then there is also a requirement to avoid sending a `DiscoveryResponse` +[unless a change to the underlying resources has +occurred](#when-to-send-an-update). diff --git a/api/bazel/BUILD b/api/bazel/BUILD new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl new file mode 100644 index 000000000000..7b3451e7b182 --- /dev/null +++ b/api/bazel/api_build_system.bzl @@ -0,0 +1,144 @@ +load("@com_google_protobuf//:protobuf.bzl", "py_proto_library") +load("@com_lyft_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library", "go_grpc_library") +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +_PY_SUFFIX="_py" +_CC_SUFFIX="_cc" +_GO_PROTO_SUFFIX="_go_proto" +_GO_GRPC_SUFFIX="_go_grpc" +_GO_IMPORTPATH_PREFIX="github.com/envoyproxy/data-plane-api/api/" + +def _Suffix(d, suffix): + return d + suffix + +def _LibrarySuffix(library_name, suffix): + # Transform //a/b/c to //a/b/c:c in preparation for suffix operation below. + if library_name.startswith("//") and ":" not in library_name: + library_name += ":" + Label(library_name).name + return _Suffix(library_name, suffix) + + +# TODO(htuch): has_services is currently ignored but will in future support +# gRPC stub generation. +# TODO(htuch): Convert this to native py_proto_library once +# https://github.com/bazelbuild/bazel/issues/3935 and/or +# https://github.com/bazelbuild/bazel/issues/2626 are resolved. +def api_py_proto_library(name, srcs = [], deps = [], has_services = 0): + py_proto_library( + name = _Suffix(name, _PY_SUFFIX), + srcs = srcs, + default_runtime = "@com_google_protobuf//:protobuf_python", + protoc = "@com_google_protobuf//:protoc", + deps = [_LibrarySuffix(d, _PY_SUFFIX) for d in deps] + [ + "@com_lyft_protoc_gen_validate//validate:validate_py", + "@googleapis//:http_api_protos_py", + "@googleapis//:rpc_status_protos_py", + "@com_github_gogo_protobuf//:gogo_proto_py", + ], + visibility = ["//visibility:public"], + ) + +def api_go_proto_library(name, proto, deps = []): + go_proto_library( + name = _Suffix(name, _GO_PROTO_SUFFIX), + importpath = _Suffix(_GO_IMPORTPATH_PREFIX, name), + proto = proto, + visibility = ["//visibility:public"], + deps = deps + [ + "@com_github_gogo_protobuf//:gogo_proto_go", + "@com_github_golang_protobuf//ptypes/duration:go_default_library", + "@com_github_golang_protobuf//ptypes/struct:go_default_library", + "@com_github_golang_protobuf//ptypes/timestamp:go_default_library", + "@com_github_golang_protobuf//ptypes/wrappers:go_default_library", + "@com_github_golang_protobuf//ptypes/any:go_default_library", + "@com_lyft_protoc_gen_validate//validate:go_default_library", + "@googleapis//:rpc_status_go_proto", + ] + ) + +def api_go_grpc_library(name, proto, deps = []): + go_grpc_library( + name = _Suffix(name, _GO_GRPC_SUFFIX), + importpath = _Suffix(_GO_IMPORTPATH_PREFIX, name), + proto = proto, + visibility = ["//visibility:public"], + deps = deps + [ + "@com_github_gogo_protobuf//:gogo_proto_go", + "@com_github_golang_protobuf//ptypes/duration:go_default_library", + "@com_github_golang_protobuf//ptypes/struct:go_default_library", + "@com_github_golang_protobuf//ptypes/wrappers:go_default_library", + "@com_github_golang_protobuf//ptypes/any:go_default_library", + "@com_lyft_protoc_gen_validate//validate:go_default_library", + "@googleapis//:http_api_go_proto", + ] + ) + +# TODO(htuch): has_services is currently ignored but will in future support +# gRPC stub generation. +# TODO(htuch): Automatically generate go_proto_library and go_grpc_library +# from api_proto_library. +def api_proto_library(name, visibility = ["//visibility:private"], srcs = [], deps = [], has_services = 0, require_py = 1): + # This is now vestigial, since there are no direct consumers in + # the data plane API. However, we want to maintain native proto_library support + # in the proto graph to (1) support future C++ use of native rules with + # cc_proto_library (or some Bazel aspect that works on proto_library) when + # it can play well with the PGV plugin and (2) other language support that + # can make use of native proto_library. + + if visibility == ["//visibility:private"]: + visibility = ["//docs"] + elif visibility != ["//visibility:public"]: + visibility = visibility + ["//docs"] + + native.proto_library( + name = name, + srcs = srcs, + deps = deps + [ + "@com_google_protobuf//:any_proto", + "@com_google_protobuf//:descriptor_proto", + "@com_google_protobuf//:duration_proto", + "@com_google_protobuf//:struct_proto", + "@com_google_protobuf//:timestamp_proto", + "@com_google_protobuf//:wrappers_proto", + "@googleapis//:http_api_protos_proto", + "@googleapis//:rpc_status_protos_lib", + "@com_github_gogo_protobuf//:gogo_proto", + "@com_lyft_protoc_gen_validate//validate:validate_proto", + ], + visibility = visibility, + ) + # Under the hood, this is just an extension of the Protobuf library's + # bespoke cc_proto_library. It doesn't consume proto_library as a proto + # provider. Hopefully one day we can move to a model where this target and + # the proto_library above are aligned. + pgv_cc_proto_library( + name = _Suffix(name, _CC_SUFFIX), + srcs = srcs, + deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in deps], + external_deps = [ + "@com_google_protobuf//:cc_wkt_protos", + "@googleapis//:http_api_protos", + "@googleapis//:rpc_status_protos", + "@com_github_gogo_protobuf//:gogo_proto_cc", + ], + visibility = ["//visibility:public"], + ) + if (require_py == 1): + api_py_proto_library(name, srcs, deps, has_services) + +def api_cc_test(name, srcs, proto_deps): + native.cc_test( + name = name, + srcs = srcs, + deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in proto_deps], + ) + +def api_go_test(name, size, importpath, srcs = [], deps = []): + go_test( + name = name, + size = size, + srcs = srcs, + importpath = importpath, + deps = deps, + ) diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl new file mode 100644 index 000000000000..e122ebb9c839 --- /dev/null +++ b/api/bazel/repositories.bzl @@ -0,0 +1,225 @@ +GOOGLEAPIS_SHA = "5c6df0cd18c6a429eab739fb711c27f6e1393366" # May 14, 2017 +GOGOPROTO_SHA = "342cbe0a04158f6dcb03ca0079991a51a4248c02" # Oct 7, 2017 +PROMETHEUS_SHA = "6f3806018612930941127f2a7c6c453ba2c527d2" # Nov 02, 2017 +OPENCENSUS_SHA = "993c711ba22a5f08c1d4de58a3c07466995ed962" # Dec 13, 2017 + +PGV_GIT_SHA = "3204975f8145b7d187081b7034060012ae838d17" + +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") + +def api_dependencies(): + git_repository( + name = "com_lyft_protoc_gen_validate", + remote = "https://github.com/lyft/protoc-gen-validate.git", + commit = PGV_GIT_SHA, + ) + native.new_http_archive( + name = "googleapis", + strip_prefix = "googleapis-" + GOOGLEAPIS_SHA, + url = "https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_SHA + ".tar.gz", + build_file_content = """ +load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +filegroup( + name = "http_api_protos_src", + srcs = [ + "google/api/annotations.proto", + "google/api/http.proto", + ], + visibility = ["//visibility:public"], + ) + +go_proto_library( + name = "descriptor_go_proto", + importpath = "github.com/golang/protobuf/protoc-gen-go/descriptor", + proto = "@com_google_protobuf//:descriptor_proto", + visibility = ["//visibility:public"], +) + +proto_library( + name = "http_api_protos_proto", + srcs = [":http_api_protos_src"], + deps = ["@com_google_protobuf//:descriptor_proto"], + visibility = ["//visibility:public"], +) + +cc_proto_library( + name = "http_api_protos", + srcs = [ + "google/api/annotations.proto", + "google/api/http.proto", + ], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", + deps = ["@com_google_protobuf//:cc_wkt_protos"], + visibility = ["//visibility:public"], +) + +py_proto_library( + name = "http_api_protos_py", + srcs = [ + "google/api/annotations.proto", + "google/api/http.proto", + ], + include = ".", + default_runtime = "@com_google_protobuf//:protobuf_python", + protoc = "@com_google_protobuf//:protoc", + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:protobuf_python"], +) + +go_proto_library( + name = "http_api_go_proto", + importpath = "google.golang.org/genproto/googleapis/api/annotations", + proto = ":http_api_protos_proto", + visibility = ["//visibility:public"], + deps = [ + ":descriptor_go_proto", + ], +) + +filegroup( + name = "rpc_status_protos_src", + srcs = [ + "google/rpc/status.proto", + ], + visibility = ["//visibility:public"], +) + +proto_library( + name = "rpc_status_protos_lib", + srcs = [":rpc_status_protos_src"], + deps = ["@com_google_protobuf//:any_proto"], + visibility = ["//visibility:public"], +) +cc_proto_library( + name = "rpc_status_protos", + srcs = ["google/rpc/status.proto"], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", + deps = [ + "@com_google_protobuf//:cc_wkt_protos" + ], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "rpc_status_go_proto", + importpath = "google.golang.org/genproto/googleapis/rpc/status", + proto = ":rpc_status_protos_lib", + visibility = ["//visibility:public"], + deps = [ + "@com_github_golang_protobuf//ptypes/any:go_default_library", + ], +) + +py_proto_library( + name = "rpc_status_protos_py", + srcs = [ + "google/rpc/status.proto", + ], + include = ".", + default_runtime = "@com_google_protobuf//:protobuf_python", + protoc = "@com_google_protobuf//:protoc", + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:protobuf_python"], +) +""", + ) + + native.new_http_archive( + name = "com_github_gogo_protobuf", + strip_prefix = "protobuf-" + GOGOPROTO_SHA, + url = "https://github.com/gogo/protobuf/archive/" + GOGOPROTO_SHA + ".tar.gz", + build_file_content = """ +load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +proto_library( + name = "gogo_proto", + srcs = [ + "gogoproto/gogo.proto", + ], + deps = [ + "@com_google_protobuf//:descriptor_proto", + ], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "descriptor_go_proto", + importpath = "github.com/golang/protobuf/protoc-gen-go/descriptor", + proto = "@com_google_protobuf//:descriptor_proto", + visibility = ["//visibility:public"], +) + +cc_proto_library( + name = "gogo_proto_cc", + srcs = [ + "gogoproto/gogo.proto", + ], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", + deps = ["@com_google_protobuf//:cc_wkt_protos"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "gogo_proto_go", + importpath = "gogoproto", + proto = ":gogo_proto", + visibility = ["//visibility:public"], + deps = [ + ":descriptor_go_proto", + ], +) + +py_proto_library( + name = "gogo_proto_py", + srcs = [ + "gogoproto/gogo.proto", + ], + default_runtime = "@com_google_protobuf//:protobuf_python", + protoc = "@com_google_protobuf//:protoc", + visibility = ["//visibility:public"], + deps = ["@com_google_protobuf//:protobuf_python"], +) + """, + ) + + native.new_http_archive( + name = "promotheus_metrics_model", + strip_prefix = "client_model-" + PROMETHEUS_SHA, + url = "https://github.com/prometheus/client_model/archive/" + PROMETHEUS_SHA + ".tar.gz", + build_file_content = """ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") + +api_proto_library( + name = "client_model", + srcs = [ + "metrics.proto", + ], + visibility = ["//visibility:public"], +) + """, + ) + + native.new_http_archive( + name = "io_opencensus_trace", + strip_prefix = "opencensus-proto-" + OPENCENSUS_SHA + "/opencensus/proto/trace", + url = "https://github.com/census-instrumentation/opencensus-proto/archive/" + OPENCENSUS_SHA + ".tar.gz", + build_file_content = """ +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_library") + +api_proto_library( + name = "trace_model", + srcs = [ + "trace.proto", + ], + visibility = ["//visibility:public"], +) + """, + ) + + diff --git a/api/diagrams/ads.svg b/api/diagrams/ads.svg new file mode 100644 index 000000000000..f2302abd8d32 --- /dev/null +++ b/api/diagrams/ads.svg @@ -0,0 +1,9 @@ +participant Envoy as E [color="black"] +participant Management Server as M [color="black"] + +E->M: (V=X,R={},N=A,T=CDS) [color="green"] +M->E: (V=Y,R={foo:...},N=B,T=CDS) [color="gray"] +E->M: (V=,R={foo},N=,T=EDS) [color="green"] +M->E: (V=M,R={foo:...},N=D,T=EDS) [color="gray"] +E->M: (V=M,R={foo},N=D,T=EDS) [color="green"] +E->M: (V=Y,R={},N=B,T=CDS) [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement ServerManagement Server(V=X,R={},N=A,T=CDS)(V=Y,R={foo:...},N=B,T=CDS)(V=,R={foo},N=,T=EDS)(V=M,R={foo:...},N=D,T=EDS)(V=M,R={foo},N=D,T=EDS)(V=Y,R={},N=B,T=CDS) \ No newline at end of file diff --git a/api/diagrams/cds-eds-resources.svg b/api/diagrams/cds-eds-resources.svg new file mode 100644 index 000000000000..7fbe958c8953 --- /dev/null +++ b/api/diagrams/cds-eds-resources.svg @@ -0,0 +1,8 @@ +participant Envoy as E [color="black"] +participant Management Server 0 as M0 [color="black"] +participant Management Server 1 as M1 [color="black"] + +E->M1: (V=..,R={},N=..,T=CDS) [color="green"] +E->M0: (V=X,R={foo},N=A,T=EDS) [color="green"] +M1->E: (V=M,R={foo:...,bar:...},N=D,T=CDS) [color="gray"] +E->M0: (V=X,R={foo,bar},N=A,T=EDS [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement Server 0Management Server 0Management Server 1Management Server 1(V=..,R={},N=..,T=CDS)(V=X,R={foo},N=A,T=EDS)(V=M,R={foo:...,bar:...},N=D,T=CDS)(V=X,R={foo,bar},N=A,T=EDS \ No newline at end of file diff --git a/api/diagrams/eds-distinct-stream.svg b/api/diagrams/eds-distinct-stream.svg new file mode 100644 index 000000000000..d00f0169d1c1 --- /dev/null +++ b/api/diagrams/eds-distinct-stream.svg @@ -0,0 +1,10 @@ +participant Envoy as E [color="black"] +participant Management Server 0 as M0 [color="black"] +participant Management Server 1 as M1 [color="black"] + +E->M0: (V=X,R={foo},N=A,T=EDS) [color="green"] +E->M1: (V=M,R={bar},N=D,T=EDS) [color="green"] +M0->E: (V=Y,R={foo:...,},N=B,T=EDS) [color="gray"] +E->M0: (V=Y,R={foo},N=B,T=EDS) [color="green"] +M1->E: (V=N,R={bar:...,},N=E,T=EDS) [color="gray"] +E->M1: (V=N,R={bar},N=E,T=EDS) [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement Server 0Management Server 0Management Server 1Management Server 1(V=X,R={foo},N=A,T=EDS)(V=M,R={bar},N=D,T=EDS)(V=Y,R={foo:...,},N=B,T=EDS)(V=Y,R={foo},N=B,T=EDS)(V=N,R={bar:...,},N=E,T=EDS)(V=N,R={bar},N=E,T=EDS) \ No newline at end of file diff --git a/api/diagrams/eds-same-stream.svg b/api/diagrams/eds-same-stream.svg new file mode 100644 index 000000000000..1720ed03a3bd --- /dev/null +++ b/api/diagrams/eds-same-stream.svg @@ -0,0 +1,6 @@ +participant Envoy as E [color="black"] +participant Management Server as M [color="black"] + +E->M: (V=X,R={foo,bar},N=A,T=EDS) [color="green"] +M->E: (V=Y,R={foo:...,bar:...},N=B,T=EDS) [color="gray"] +E->M: (V=Y,R={foo,bar},N=B,T=EDS) [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement ServerManagement Server(V=X,R={foo,bar},N=A,T=EDS)(V=Y,R={foo:...,bar:...},N=B,T=EDS)(V=Y,R={foo,bar},N=B,T=EDS) \ No newline at end of file diff --git a/api/diagrams/envoy-perf-script.svg b/api/diagrams/envoy-perf-script.svg new file mode 100644 index 000000000000..74759e14829d --- /dev/null +++ b/api/diagrams/envoy-perf-script.svg @@ -0,0 +1,4795 @@ + + + + + + + + + + + + + +Flame Graph + +Reset Zoom +Search + + +__ip_local_out (1 samples, 0.40%) + + + +inet_ehashfn (1 samples, 0.40%) + + + +sock_sendmsg (2 samples, 0.81%) + + + +skb_clone (1 samples, 0.40%) + + + +__netif_receive_skb (4 samples, 1.62%) + + + +ip_local_out (8 samples, 3.24%) +ip_.. + + +do_iter_readv_writev (1 samples, 0.40%) + + + +do_softirq_own_stack (1 samples, 0.40%) + + + +ipv4_mtu (1 samples, 0.40%) + + + +inet_recvmsg (3 samples, 1.21%) + + + +do_writev (19 samples, 7.69%) +do_writev + + +sk_reset_timer (1 samples, 0.40%) + + + +ip_finish_output (7 samples, 2.83%) +ip.. + + +__sk_dst_check (1 samples, 0.40%) + + + +ip_output (7 samples, 2.83%) +ip.. + + +tcmalloc::ThreadCache::ReleaseToCentralCache (1 samples, 0.40%) + + + +tcp_rcv_established (3 samples, 1.21%) + + + +sys_writev (8 samples, 3.24%) +sys.. + + +__libc_readv (5 samples, 2.02%) +_.. + + +tcp_transmit_skb (3 samples, 1.21%) + + + +release_sock (1 samples, 0.40%) + + + +__netif_receive_skb_core (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +ip_queue_xmit (6 samples, 2.43%) +ip.. + + +tcp_send_delayed_ack (1 samples, 0.40%) + + + +tcp_push (7 samples, 2.83%) +tc.. + + +dev_queue_xmit_nit (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (8 samples, 3.24%) +ent.. + + +do_readv_writev (3 samples, 1.21%) + + + +ip_rcv (2 samples, 0.81%) + + + +__netif_receive_skb_core (3 samples, 1.21%) + + + +__softirqentry_text_start (2 samples, 0.81%) + + + +tcp_rcv_established (1 samples, 0.40%) + + + +sock_def_readable (1 samples, 0.40%) + + + +do_readv_writev (7 samples, 2.83%) +do.. + + +tcp_recvmsg (2 samples, 0.81%) + + + +tcp_push (12 samples, 4.86%) +tcp_push + + +ip_rcv (1 samples, 0.40%) + + + +vfs_writev (3 samples, 1.21%) + + + +ip_local_deliver (4 samples, 1.62%) + + + +__local_bh_enable_ip (4 samples, 1.62%) + + + +Envoy::Buffer::WatermarkBuffer::write (65 samples, 26.32%) +Envoy::Buffer::WatermarkBuffer::write + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +ip_rcv_finish (3 samples, 1.21%) + + + +__tcp_push_pending_frames (3 samples, 1.21%) + + + +sys_writev (13 samples, 5.26%) +sys_wr.. + + +ip_output (2 samples, 0.81%) + + + +__libc_writev (13 samples, 5.26%) +__libc.. + + +tcp_rcv_established (1 samples, 0.40%) + + + +sock_sendmsg (12 samples, 4.86%) +sock_s.. + + +Envoy::Network::ConnectionImpl::doWriteToSocket (98 samples, 39.68%) +Envoy::Network::ConnectionImpl::doWriteToSocket + + +copy_user_enhanced_fast_string (5 samples, 2.02%) +c.. + + +dev_gro_receive (1 samples, 0.40%) + + + +__wake_up_sync_key (3 samples, 1.21%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +tcp_rcv_established (2 samples, 0.81%) + + + +do_iter_readv_writev (1 samples, 0.40%) + + + +do_readv_writev (2 samples, 0.81%) + + + +do_readv_writev (12 samples, 4.86%) +do_rea.. + + +sock_sendmsg (2 samples, 0.81%) + + + +tcp_v4_rcv (4 samples, 1.62%) + + + +net_rx_action (1 samples, 0.40%) + + + +[libc-2.17.so] (1 samples, 0.40%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +ip_local_deliver_finish (3 samples, 1.21%) + + + +ip_output (6 samples, 2.43%) +ip.. + + +__netif_receive_skb (3 samples, 1.21%) + + + +do_readv_writev (5 samples, 2.02%) +d.. + + +sys_readv (5 samples, 2.02%) +s.. + + +ip_queue_xmit (3 samples, 1.21%) + + + +evbuffer_expand_fast_ (3 samples, 1.21%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +netif_receive_skb_internal (1 samples, 0.40%) + + + +sock_read_iter (2 samples, 0.81%) + + + +do_readv_writev (15 samples, 6.07%) +do_readv.. + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +tcp_sendmsg (13 samples, 5.26%) +tcp_se.. + + +ip_finish_output2 (5 samples, 2.02%) +i.. + + +__netif_receive_skb (2 samples, 0.81%) + + + +ip_finish_output (4 samples, 1.62%) + + + +__libc_writev (8 samples, 3.24%) +__l.. + + +[unknown] (3 samples, 1.21%) + + + +__libc_readv (2 samples, 0.81%) + + + +do_readv (2 samples, 0.81%) + + + +__netif_receive_skb (4 samples, 1.62%) + + + +_raw_spin_lock_bh (1 samples, 0.40%) + + + +sock_def_readable (1 samples, 0.40%) + + + +ip_output (8 samples, 3.24%) +ip_.. + + +ipv4_dst_check (1 samples, 0.40%) + + + +tcp_sendmsg (11 samples, 4.45%) +tcp_s.. + + +tcp_recvmsg (1 samples, 0.40%) + + + +do_softirq (4 samples, 1.62%) + + + +[unknown] (19 samples, 7.69%) +[unknown] + + +skb_entail (1 samples, 0.40%) + + + +process_backlog (3 samples, 1.21%) + + + +__lock_text_start (1 samples, 0.40%) + + + +do_iter_readv_writev (2 samples, 0.81%) + + + +ip_finish_output (2 samples, 0.81%) + + + +__dev_queue_xmit (3 samples, 1.21%) + + + +ip_rcv_finish (2 samples, 0.81%) + + + +ip_output (1 samples, 0.40%) + + + +ip_finish_output2 (6 samples, 2.43%) +ip.. + + +copy_from_iter (1 samples, 0.40%) + + + +inet_sendmsg (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (19 samples, 7.69%) +entry_SYSC.. + + +entry_SYSCALL_64_fastpath (13 samples, 5.26%) +entry_.. + + +tcp_send_ack (1 samples, 0.40%) + + + +inet_recvmsg (1 samples, 0.40%) + + + +__GI___ioctl (1 samples, 0.40%) + + + +ip_finish_output2 (4 samples, 1.62%) + + + +do_writev (4 samples, 1.62%) + + + +inet_sendmsg (1 samples, 0.40%) + + + +ip_local_out (10 samples, 4.05%) +ip_l.. + + +sys_writev (1 samples, 0.40%) + + + +__tcp_ack_snd_check (1 samples, 0.40%) + + + +tcp_recvmsg (1 samples, 0.40%) + + + +sock_sendmsg (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (2 samples, 0.81%) + + + +do_readv_writev (19 samples, 7.69%) +do_readv_w.. + + +sys_writev (33 samples, 13.36%) +sys_writev + + +tcp_push_one (2 samples, 0.81%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +fsnotify (1 samples, 0.40%) + + + +netif_rx_internal (1 samples, 0.40%) + + + +dev_hard_start_xmit (1 samples, 0.40%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +process_backlog (2 samples, 0.81%) + + + +do_softirq_own_stack (6 samples, 2.43%) +do.. + + +ip_local_deliver_finish (1 samples, 0.40%) + + + +do_writev (15 samples, 6.07%) +do_writev + + +__dev_queue_xmit (2 samples, 0.81%) + + + +copy_user_enhanced_fast_string (11 samples, 4.45%) +copy_.. + + +process_backlog (2 samples, 0.81%) + + + +Envoy::Network::ClientConnectionImpl::~ClientConnectionImpl (1 samples, 0.40%) + + + +ip_rcv_finish (3 samples, 1.21%) + + + +loopback_xmit (3 samples, 1.21%) + + + +do_writev (17 samples, 6.88%) +do_writev + + +do_readv_writev (17 samples, 6.88%) +do_readv_.. + + +__local_bh_enable_ip (1 samples, 0.40%) + + + +ip_local_deliver (3 samples, 1.21%) + + + +do_readv (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +tcp_transmit_skb (6 samples, 2.43%) +tc.. + + +__netif_receive_skb_core (2 samples, 0.81%) + + + +tcp_sendmsg (17 samples, 6.88%) +tcp_sendmsg + + +sk_page_frag_refill (1 samples, 0.40%) + + + +__tcp_push_pending_frames (3 samples, 1.21%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +sock_read_iter (3 samples, 1.21%) + + + +ip_rcv_finish (3 samples, 1.21%) + + + +ip_rcv (3 samples, 1.21%) + + + +__tcp_ack_snd_check (1 samples, 0.40%) + + + +__dev_queue_xmit (1 samples, 0.40%) + + + +sys_writev (15 samples, 6.07%) +sys_writev + + +queued_spin_lock_slowpath (1 samples, 0.40%) + + + +ip_local_deliver (1 samples, 0.40%) + + + +tcp_push (7 samples, 2.83%) +tc.. + + +do_iter_readv_writev (4 samples, 1.62%) + + + +sock_recvmsg (4 samples, 1.62%) + + + +xen_clocksource_read (1 samples, 0.40%) + + + +process_backlog (1 samples, 0.40%) + + + +do_readv (2 samples, 0.81%) + + + +__libc_writev (7 samples, 2.83%) +__.. + + +sys_readv (1 samples, 0.40%) + + + +__dev_queue_xmit (1 samples, 0.40%) + + + +ip_finish_output2 (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (2 samples, 0.81%) + + + +__skb_clone (1 samples, 0.40%) + + + +tcp_rcv_established (1 samples, 0.40%) + + + +ip_finish_output (1 samples, 0.40%) + + + +ip_rcv_finish (2 samples, 0.81%) + + + +pvclock_clocksource_read (1 samples, 0.40%) + + + +skb_release_all (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (1 samples, 0.40%) + + + +vfs_readv (3 samples, 1.21%) + + + +ip_finish_output (1 samples, 0.40%) + + + +do_softirq_own_stack (1 samples, 0.40%) + + + +__lock_text_start (1 samples, 0.40%) + + + +copy_from_iter (1 samples, 0.40%) + + + +vfs_writev (17 samples, 6.88%) +vfs_writev + + +evbuffer_get_length (1 samples, 0.40%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +inet_sendmsg (6 samples, 2.43%) +in.. + + +evbuffer_drain (3 samples, 1.21%) + + + +sock_recvmsg (3 samples, 1.21%) + + + +do_softirq_own_stack (3 samples, 1.21%) + + + +netif_rx (1 samples, 0.40%) + + + +ip_finish_output (1 samples, 0.40%) + + + +__tcp_push_pending_frames (7 samples, 2.83%) +__.. + + +__tcp_v4_send_check (1 samples, 0.40%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +sock_sendmsg (6 samples, 2.43%) +so.. + + +[unknown] (19 samples, 7.69%) +[unknown] + + +sk_stream_alloc_skb (1 samples, 0.40%) + + + +netif_rx (1 samples, 0.40%) + + + +tcp_v4_do_rcv (5 samples, 2.02%) +t.. + + +tcp_transmit_skb (1 samples, 0.40%) + + + +__tcp_push_pending_frames (8 samples, 3.24%) +__t.. + + +ip_finish_output2 (2 samples, 0.81%) + + + +ip_output (2 samples, 0.81%) + + + +skb_page_frag_refill (2 samples, 0.81%) + + + +ip_local_out (6 samples, 2.43%) +ip.. + + +Envoy::Network::ConnectionImpl::onFileEvent (164 samples, 66.40%) +Envoy::Network::ConnectionImpl::onFileEvent + + +tcp_write_xmit (1 samples, 0.40%) + + + +process_backlog (3 samples, 1.21%) + + + +dev_hard_start_xmit (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (3 samples, 1.21%) + + + +tcp_rcv_established (2 samples, 0.81%) + + + +net_rx_action (3 samples, 1.21%) + + + +tcp_transmit_skb (10 samples, 4.05%) +tcp_.. + + +tcp_rcv_established (1 samples, 0.40%) + + + +__lock_text_start (1 samples, 0.40%) + + + +process_backlog (1 samples, 0.40%) + + + +tcp_v4_rcv (2 samples, 0.81%) + + + +tcp_push (5 samples, 2.02%) +t.. + + +do_softirq_own_stack (1 samples, 0.40%) + + + +tcp_push (5 samples, 2.02%) +t.. + + +entry_SYSCALL_64_fastpath (1 samples, 0.40%) + + + +ip_local_out (1 samples, 0.40%) + + + +sock_def_readable (2 samples, 0.81%) + + + +do_softirq_own_stack (2 samples, 0.81%) + + + +__lock_text_start (1 samples, 0.40%) + + + +__lock_text_start (4 samples, 1.62%) + + + +tcp_write_xmit (10 samples, 4.05%) +tcp_.. + + +__libc_writev (2 samples, 0.81%) + + + +ip_finish_output2 (1 samples, 0.40%) + + + +do_iter_readv_writev (1 samples, 0.40%) + + + +tcp_stream_memory_free (1 samples, 0.40%) + + + +__lock_text_start (3 samples, 1.21%) + + + +do_softirq_own_stack (2 samples, 0.81%) + + + +ip_finish_output (1 samples, 0.40%) + + + +__tcp_push_pending_frames (1 samples, 0.40%) + + + +do_iter_readv_writev (3 samples, 1.21%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +skb_entail (1 samples, 0.40%) + + + +tcp_send_delayed_ack (1 samples, 0.40%) + + + +ip_finish_output (5 samples, 2.02%) +i.. + + +tcp_v4_rcv (4 samples, 1.62%) + + + +sock_write_iter (13 samples, 5.26%) +sock_w.. + + +sock_write_iter (3 samples, 1.21%) + + + +ip_local_deliver (4 samples, 1.62%) + + + +ip_local_deliver (1 samples, 0.40%) + + + +process_backlog (3 samples, 1.21%) + + + +tcp_send_ack (1 samples, 0.40%) + + + +ip_local_deliver_finish (3 samples, 1.21%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +enqueue_to_backlog (1 samples, 0.40%) + + + +dev_hard_start_xmit (2 samples, 0.81%) + + + +do_softirq (2 samples, 0.81%) + + + +do_iter_readv_writev (17 samples, 6.88%) +do_iter_r.. + + +ip_rcv (6 samples, 2.43%) +ip.. + + +xen_hvm_callback_vector (1 samples, 0.40%) + + + +sock_def_readable (1 samples, 0.40%) + + + +tcp_v4_do_rcv (3 samples, 1.21%) + + + +__bpf_prog_run (1 samples, 0.40%) + + + +__alloc_pages_nodemask (1 samples, 0.40%) + + + +__dev_queue_xmit (4 samples, 1.62%) + + + +do_softirq (1 samples, 0.40%) + + + +do_iter_readv_writev (1 samples, 0.40%) + + + +net_rx_action (1 samples, 0.40%) + + + +inet_sendmsg (2 samples, 0.81%) + + + +sys_readv (1 samples, 0.40%) + + + +sk_filter_trim_cap (1 samples, 0.40%) + + + +sock_write_iter (3 samples, 1.21%) + + + +__lock_text_start (1 samples, 0.40%) + + + +ip_output (3 samples, 1.21%) + + + +tcp_data_queue (1 samples, 0.40%) + + + +__lock_text_start (1 samples, 0.40%) + + + +ip_output (1 samples, 0.40%) + + + +ip_local_deliver (3 samples, 1.21%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +ep_poll (1 samples, 0.40%) + + + +ip_rcv (3 samples, 1.21%) + + + +dev_hard_start_xmit (2 samples, 0.81%) + + + +vfs_writev (3 samples, 1.21%) + + + +skb_copy_datagram_iter (1 samples, 0.40%) + + + +__lock_text_start (2 samples, 0.81%) + + + +__libc_writev (6 samples, 2.43%) +__.. + + +ip_output (1 samples, 0.40%) + + + +ip_rcv_finish (5 samples, 2.02%) +i.. + + +sched_clock_cpu (1 samples, 0.40%) + + + +__GI___ioctl (1 samples, 0.40%) + + + +napi_gro_receive (1 samples, 0.40%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +tcp_write_xmit (1 samples, 0.40%) + + + +sys_writev (3 samples, 1.21%) + + + +ip_local_deliver (2 samples, 0.81%) + + + +__dev_queue_xmit (2 samples, 0.81%) + + + +tcp_write_xmit (7 samples, 2.83%) +tc.. + + +__local_bh_enable_ip (6 samples, 2.43%) +__.. + + +do_softirq_own_stack (2 samples, 0.81%) + + + +event_base_loop (189 samples, 76.52%) +event_base_loop + + +__local_bh_enable_ip (2 samples, 0.81%) + + + +dev_hard_start_xmit (1 samples, 0.40%) + + + +xen_evtchn_do_upcall (1 samples, 0.40%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +__netif_receive_skb (2 samples, 0.81%) + + + +sk_stream_alloc_skb (1 samples, 0.40%) + + + +Envoy::Network::ConnectionImpl::onWriteReady (141 samples, 57.09%) +Envoy::Network::ConnectionImpl::onWriteReady + + +do_readv_writev (2 samples, 0.81%) + + + +ip_queue_xmit (8 samples, 3.24%) +ip_.. + + +ip_finish_output (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (1 samples, 0.40%) + + + +do_writev (19 samples, 7.69%) +do_writev + + +ip_finish_output (8 samples, 3.24%) +ip_.. + + +sock_def_readable (4 samples, 1.62%) + + + +tcp_sendmsg (1 samples, 0.40%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +Envoy::Network::FilterManagerImpl::onContinueReading (4 samples, 1.62%) + + + +process_backlog (4 samples, 1.62%) + + + +tcp_v4_rcv (1 samples, 0.40%) + + + +do_softirq (1 samples, 0.40%) + + + +sock_def_readable (1 samples, 0.40%) + + + +do_readv_writev (1 samples, 0.40%) + + + +ip_local_out (3 samples, 1.21%) + + + +epoll_dispatch (1 samples, 0.40%) + + + +__xfrm_policy_check2.constprop.43 (1 samples, 0.40%) + + + +vfs_writev (24 samples, 9.72%) +vfs_writev + + +__tcp_ack_snd_check (1 samples, 0.40%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +ip_queue_xmit (8 samples, 3.24%) +ip_.. + + +__tcp_ack_snd_check (1 samples, 0.40%) + + + +tcp_write_xmit (11 samples, 4.45%) +tcp_w.. + + +ip_finish_output2 (1 samples, 0.40%) + + + +tcp_v4_rcv (3 samples, 1.21%) + + + +ip_queue_xmit (4 samples, 1.62%) + + + +__libc_writev (1 samples, 0.40%) + + + +process_backlog (2 samples, 0.81%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +process_backlog (5 samples, 2.02%) +p.. + + +__libc_readv (2 samples, 0.81%) + + + +ip_local_deliver_finish (1 samples, 0.40%) + + + +do_readv_writev (19 samples, 7.69%) +do_readv_w.. + + +[unknown] (1 samples, 0.40%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +tcp_rcv_established (3 samples, 1.21%) + + + +__netif_receive_skb (1 samples, 0.40%) + + + +sock_read_iter (1 samples, 0.40%) + + + +tcp_rcv_established (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +tcp_sendmsg (4 samples, 1.62%) + + + +Envoy::Network::ConnectionImplUtility::updateBufferStats (1 samples, 0.40%) + + + +loopback_xmit (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (13 samples, 5.26%) +entry_.. + + +tcp_transmit_skb (1 samples, 0.40%) + + + +do_writev (3 samples, 1.21%) + + + +[unknown] (36 samples, 14.57%) +[unknown] + + +process_backlog (5 samples, 2.02%) +p.. + + +tcp_push_one (1 samples, 0.40%) + + + +__skb_clone (1 samples, 0.40%) + + + +sock_write_iter (2 samples, 0.81%) + + + +xen_clocksource_get_cycles (1 samples, 0.40%) + + + +__netif_receive_skb (1 samples, 0.40%) + + + +tcp_transmit_skb (10 samples, 4.05%) +tcp_.. + + +do_readv_writev (8 samples, 3.24%) +do_.. + + +tcp_sendmsg (6 samples, 2.43%) +tc.. + + +_ZZN5Envoy6Thread6ThreadC4ESt8functionIFvvEEENUlPvE_4_FUNES5_ (211 samples, 85.43%) +_ZZN5Envoy6Thread6ThreadC4ESt8functionIFvvEEENUlPvE_4_FUNES5_ + + +__netif_receive_skb (6 samples, 2.43%) +__.. + + +skb_page_frag_refill (1 samples, 0.40%) + + + +ip_rcv_finish (3 samples, 1.21%) + + + +tcp_transmit_skb (7 samples, 2.83%) +tc.. + + +sock_recvmsg (2 samples, 0.81%) + + + +skb_clone (1 samples, 0.40%) + + + +__softirqentry_text_start (4 samples, 1.62%) + + + +entry_SYSCALL_64_fastpath (6 samples, 2.43%) +en.. + + +tcp_v4_do_rcv (2 samples, 0.81%) + + + +tcp_v4_do_rcv (3 samples, 1.21%) + + + +vfs_writev (2 samples, 0.81%) + + + +__lock_text_start (1 samples, 0.40%) + + + +ep_scan_ready_list.isra.10 (1 samples, 0.40%) + + + +net_rx_action (2 samples, 0.81%) + + + +tcp_sendmsg (1 samples, 0.40%) + + + +ip_local_deliver (2 samples, 0.81%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +ip_local_deliver (1 samples, 0.40%) + + + +irq_exit (1 samples, 0.40%) + + + +security_socket_sendmsg (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (19 samples, 7.69%) +entry_SYSC.. + + +sys_writev (19 samples, 7.69%) +sys_writev + + +__wake_up_sync_key (1 samples, 0.40%) + + + +inet_recvmsg (2 samples, 0.81%) + + + +do_iter_readv_writev (32 samples, 12.96%) +do_iter_readv_writev + + +sk_reset_timer (1 samples, 0.40%) + + + +swiotlb_dma_mapping_error (1 samples, 0.40%) + + + +sys_writev (13 samples, 5.26%) +sys_wr.. + + +do_readv_writev (4 samples, 1.62%) + + + +copy_user_enhanced_fast_string (2 samples, 0.81%) + + + +ip_finish_output2 (4 samples, 1.62%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +ip_rcv (2 samples, 0.81%) + + + +do_writev (1 samples, 0.40%) + + + +__libc_readv (1 samples, 0.40%) + + + +tcp_recvmsg (4 samples, 1.62%) + + + +__alloc_skb (1 samples, 0.40%) + + + +__netif_receive_skb (3 samples, 1.21%) + + + +tcp_sendmsg (12 samples, 4.86%) +tcp_se.. + + +__netif_receive_skb_core (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +__netif_receive_skb_core (3 samples, 1.21%) + + + +tcp_init_tso_segs (1 samples, 0.40%) + + + +do_readv_writev (2 samples, 0.81%) + + + +event_process_active_single_queue.isra.29 (167 samples, 67.61%) +event_process_active_single_queue.isra.29 + + +do_readv (1 samples, 0.40%) + + + +rw_verify_area (1 samples, 0.40%) + + + +ip_queue_xmit (4 samples, 1.62%) + + + +tcp_write_xmit (3 samples, 1.21%) + + + +tcp_init_tso_segs (1 samples, 0.40%) + + + +ip_local_out (2 samples, 0.81%) + + + +__kmalloc_reserve.isra.37 (1 samples, 0.40%) + + + +__kfree_skb (1 samples, 0.40%) + + + +sock_recvmsg (2 samples, 0.81%) + + + +loopback_xmit (1 samples, 0.40%) + + + +ip_rcv_finish (3 samples, 1.21%) + + + +tcp_tso_segs (1 samples, 0.40%) + + + +copy_from_iter (1 samples, 0.40%) + + + +__softirqentry_text_start (5 samples, 2.02%) +_.. + + +rw_verify_area (1 samples, 0.40%) + + + +sock_write_iter (7 samples, 2.83%) +so.. + + +[unknown] (3 samples, 1.21%) + + + +sock_write_iter (6 samples, 2.43%) +so.. + + +ip_local_out (1 samples, 0.40%) + + + +__release_sock (1 samples, 0.40%) + + + +event_active (18 samples, 7.29%) +event_active + + +sock_def_readable (1 samples, 0.40%) + + + +inet_sendmsg (8 samples, 3.24%) +ine.. + + +net_rx_action (5 samples, 2.02%) +n.. + + +tcp_write_xmit (7 samples, 2.83%) +tc.. + + +tcp_push (15 samples, 6.07%) +tcp_push + + +loopback_xmit (2 samples, 0.81%) + + + +lock_sock_nested (1 samples, 0.40%) + + + +do_softirq (4 samples, 1.62%) + + + +sock_read_iter (4 samples, 1.62%) + + + +tcp_sendmsg (8 samples, 3.24%) +tcp.. + + +do_softirq (3 samples, 1.21%) + + + +ip_local_out (2 samples, 0.81%) + + + +napi_gro_complete (1 samples, 0.40%) + + + +evbuffer_chain_new (3 samples, 1.21%) + + + +tcp_transmit_skb (10 samples, 4.05%) +tcp_.. + + +vfs_readv (2 samples, 0.81%) + + + +sched_clock_local (1 samples, 0.40%) + + + +tcp_v4_rcv (1 samples, 0.40%) + + + +vfs_writev (12 samples, 4.86%) +vfs_wr.. + + +sock_sendmsg (4 samples, 1.62%) + + + +sys_writev (4 samples, 1.62%) + + + +do_readv_writev (1 samples, 0.40%) + + + +irq_exit (1 samples, 0.40%) + + + +tcp_transmit_skb (2 samples, 0.81%) + + + +dev_queue_xmit (3 samples, 1.21%) + + + +Envoy::Event::FileEventImpl::assignEvents (164 samples, 66.40%) +Envoy::Event::FileEventImpl::assignEvents + + +tcp_sendmsg (15 samples, 6.07%) +tcp_send.. + + +ip_finish_output2 (1 samples, 0.40%) + + + +ip_output (4 samples, 1.62%) + + + +do_readv_writev (13 samples, 5.26%) +do_rea.. + + +do_softirq_own_stack (1 samples, 0.40%) + + + +__netif_receive_skb_core (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (2 samples, 0.81%) + + + +ip_queue_xmit (6 samples, 2.43%) +ip.. + + +tcp_send_delayed_ack (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +lock_timer_base (1 samples, 0.40%) + + + +sys_readv (2 samples, 0.81%) + + + +__local_bh_enable_ip (4 samples, 1.62%) + + + +ktime_get_with_offset (1 samples, 0.40%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +__softirqentry_text_start (4 samples, 1.62%) + + + +skb_clone (1 samples, 0.40%) + + + +tcp_sendmsg (24 samples, 9.72%) +tcp_sendmsg + + +entry_SYSCALL_64_fastpath (3 samples, 1.21%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +tcp_push (15 samples, 6.07%) +tcp_push + + +dev_hard_start_xmit (2 samples, 0.81%) + + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +Envoy::Filter::TcpProxy::onData (1 samples, 0.40%) + + + +ip_finish_output (10 samples, 4.05%) +ip_f.. + + +tcp_wfree (1 samples, 0.40%) + + + +__local_bh_enable_ip (4 samples, 1.62%) + + + +__put_page (1 samples, 0.40%) + + + +ip_local_out (7 samples, 2.83%) +ip.. + + +ip_local_deliver_finish (1 samples, 0.40%) + + + +ip_output (1 samples, 0.40%) + + + +ip_finish_output2 (6 samples, 2.43%) +ip.. + + +process_backlog (1 samples, 0.40%) + + + +do_readv (5 samples, 2.02%) +d.. + + +do_writev (24 samples, 9.72%) +do_writev + + +__netif_receive_skb_core (3 samples, 1.21%) + + + +ip_finish_output2 (7 samples, 2.83%) +ip.. + + +tcp_rcv_established (1 samples, 0.40%) + + + +tcp_sendmsg (27 samples, 10.93%) +tcp_sendmsg + + +ip_local_deliver (5 samples, 2.02%) +i.. + + +inet_sendmsg (15 samples, 6.07%) +inet_sen.. + + +copy_user_enhanced_fast_string (2 samples, 0.81%) + + + +ip_rcv (2 samples, 0.81%) + + + +ixgbevf_clean_rx_irq (1 samples, 0.40%) + + + +tcp_recvmsg (3 samples, 1.21%) + + + +tcp_v4_do_rcv (3 samples, 1.21%) + + + +dev_queue_xmit (4 samples, 1.62%) + + + +tcp_rcv_established (1 samples, 0.40%) + + + +[unknown] (3 samples, 1.21%) + + + +__netif_receive_skb_core (5 samples, 2.02%) +_.. + + +tcp_write_xmit (3 samples, 1.21%) + + + +__softirqentry_text_start (4 samples, 1.62%) + + + +ip_finish_output (8 samples, 3.24%) +ip_.. + + +__tcp_push_pending_frames (15 samples, 6.07%) +__tcp_pu.. + + +sock_def_readable (3 samples, 1.21%) + + + +tcp_nagle_check (1 samples, 0.40%) + + + +net_rx_action (4 samples, 1.62%) + + + +tcp_rcv_established (4 samples, 1.62%) + + + +ip_finish_output (2 samples, 0.81%) + + + +vfs_readv (1 samples, 0.40%) + + + +tcp_v4_do_rcv (3 samples, 1.21%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +loopback_xmit (2 samples, 0.81%) + + + +netif_rx (1 samples, 0.40%) + + + +skb_put (1 samples, 0.40%) + + + +sched_clock_cpu (1 samples, 0.40%) + + + +tcp_v4_rcv (1 samples, 0.40%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +net_rx_action (3 samples, 1.21%) + + + +tcp_transmit_skb (14 samples, 5.67%) +tcp_tra.. + + +net_rx_action (4 samples, 1.62%) + + + +tcp_rcv_established (1 samples, 0.40%) + + + +__lock_text_start (2 samples, 0.81%) + + + +xen_evtchn_do_upcall (1 samples, 0.40%) + + + +do_iter_readv_writev (24 samples, 9.72%) +do_iter_readv_.. + + +tcp_v4_do_rcv (4 samples, 1.62%) + + + +__tcp_push_pending_frames (11 samples, 4.45%) +__tcp.. + + +ip_finish_output2 (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (7 samples, 2.83%) +en.. + + +ip_finish_output2 (10 samples, 4.05%) +ip_f.. + + +__local_bh_enable_ip (2 samples, 0.81%) + + + +do_softirq (4 samples, 1.62%) + + + +vfs_readv (2 samples, 0.81%) + + + +ip_rcv_finish (1 samples, 0.40%) + + + +sys_writev (17 samples, 6.88%) +sys_writev + + +__netif_receive_skb_core (1 samples, 0.40%) + + + +tcp_sendmsg (3 samples, 1.21%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +__free_pages_ok (1 samples, 0.40%) + + + +inet_recvmsg (1 samples, 0.40%) + + + +ip_local_deliver (2 samples, 0.81%) + + + +vfs_writev (13 samples, 5.26%) +vfs_wr.. + + +Envoy::Network::ConnectionImpl::write (1 samples, 0.40%) + + + +ip_rcv (4 samples, 1.62%) + + + +spdlog::logger::log<unsigned long> (1 samples, 0.40%) + + + +vfs_writev (7 samples, 2.83%) +vf.. + + +vfs_writev (6 samples, 2.43%) +vf.. + + +gettime (1 samples, 0.40%) + + + +tcp_v4_rcv (2 samples, 0.81%) + + + +do_softirq (1 samples, 0.40%) + + + +__softirqentry_text_start (2 samples, 0.81%) + + + +__tcp_push_pending_frames (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +sock_def_readable (4 samples, 1.62%) + + + +ip_local_deliver_finish (3 samples, 1.21%) + + + +sk_free (1 samples, 0.40%) + + + +ip_queue_xmit (11 samples, 4.45%) +ip_qu.. + + +tcp_v4_rcv (3 samples, 1.21%) + + + +do_writev (12 samples, 4.86%) +do_wri.. + + +tcp_rcv_established (1 samples, 0.40%) + + + +sched_clock (1 samples, 0.40%) + + + +ip_finish_output2 (2 samples, 0.81%) + + + +__release_sock (1 samples, 0.40%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +__libc_readv (2 samples, 0.81%) + + + +__netif_receive_skb (1 samples, 0.40%) + + + +alloc_pages_current (2 samples, 0.81%) + + + +clock_gettime (1 samples, 0.40%) + + + +tcp_v4_do_rcv (2 samples, 0.81%) + + + +tcp_tso_segs (1 samples, 0.40%) + + + +sock_sendmsg (15 samples, 6.07%) +sock_sen.. + + +tcp_rcv_established (1 samples, 0.40%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +ip_local_out (8 samples, 3.24%) +ip_.. + + +start_thread (211 samples, 85.43%) +start_thread + + +__softirqentry_text_start (1 samples, 0.40%) + + + +__softirqentry_text_start (3 samples, 1.21%) + + + +vfs_readv (2 samples, 0.81%) + + + +[unknown] (3 samples, 1.21%) + + + +sock_sendmsg (8 samples, 3.24%) +soc.. + + +do_softirq (1 samples, 0.40%) + + + +ip_queue_xmit (10 samples, 4.05%) +ip_q.. + + +Envoy::Network::ConnectionImpl::write (21 samples, 8.50%) +Envoy::Netwo.. + + +__local_bh_enable_ip (5 samples, 2.02%) +_.. + + +do_iter_readv_writev (19 samples, 7.69%) +do_iter_re.. + + +vfs_writev (1 samples, 0.40%) + + + +ixgbevf_clean_rx_irq (1 samples, 0.40%) + + + +pvclock_clocksource_read (1 samples, 0.40%) + + + +vfs_writev (19 samples, 7.69%) +vfs_writev + + +do_readv (2 samples, 0.81%) + + + +__alloc_skb (1 samples, 0.40%) + + + +do_writev (2 samples, 0.81%) + + + +__netif_receive_skb_core (1 samples, 0.40%) + + + +do_iter_readv_writev (12 samples, 4.86%) +do_ite.. + + +sched_clock_cpu (1 samples, 0.40%) + + + +__copy_skb_header (1 samples, 0.40%) + + + +do_iter_readv_writev (8 samples, 3.24%) +do_.. + + +[unknown] (2 samples, 0.81%) + + + +__softirqentry_text_start (3 samples, 1.21%) + + + +ip_local_deliver_finish (4 samples, 1.62%) + + + +ip_output (4 samples, 1.62%) + + + +skb_release_data (1 samples, 0.40%) + + + +process_backlog (6 samples, 2.43%) +pr.. + + +vfs_writev (8 samples, 3.24%) +vfs.. + + +__skb_clone (1 samples, 0.40%) + + + +do_softirq_own_stack (5 samples, 2.02%) +d.. + + +ip_finish_output2 (1 samples, 0.40%) + + + +sock_write_iter (12 samples, 4.86%) +sock_w.. + + +[unknown] (17 samples, 6.88%) +[unknown] + + +_raw_spin_lock_bh (1 samples, 0.40%) + + + +net_rx_action (3 samples, 1.21%) + + + +entry_SYSCALL_64_fastpath (17 samples, 6.88%) +entry_SYS.. + + +net_rx_action (3 samples, 1.21%) + + + +sock_write_iter (24 samples, 9.72%) +sock_write_iter + + +ip_queue_xmit (1 samples, 0.40%) + + + +do_softirq_own_stack (1 samples, 0.40%) + + + +sock_def_readable (1 samples, 0.40%) + + + +do_iter_readv_writev (2 samples, 0.81%) + + + +mod_timer (1 samples, 0.40%) + + + +tcp_push (8 samples, 3.24%) +tcp.. + + +sk_free (1 samples, 0.40%) + + + +inet_sendmsg (13 samples, 5.26%) +inet_s.. + + +__libc_disable_asynccancel (1 samples, 0.40%) + + + +ip_local_out (1 samples, 0.40%) + + + +spdlog::logger::log<unsigned long, unsigned long> (1 samples, 0.40%) + + + +sock_write_iter (1 samples, 0.40%) + + + +__libc_writev (3 samples, 1.21%) + + + +netif_rx_internal (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (1 samples, 0.40%) + + + +tcp_rcv_established (4 samples, 1.62%) + + + +ip_output (2 samples, 0.81%) + + + +__pv_queued_spin_lock_slowpath (1 samples, 0.40%) + + + +dev_hard_start_xmit (1 samples, 0.40%) + + + +sched_clock (1 samples, 0.40%) + + + +ip_finish_output2 (1 samples, 0.40%) + + + +ip_finish_output (6 samples, 2.43%) +ip.. + + +__release_sock (1 samples, 0.40%) + + + +do_writev (6 samples, 2.43%) +do.. + + +ip_local_deliver_finish (3 samples, 1.21%) + + + +netif_rx_internal (1 samples, 0.40%) + + + +ip_queue_xmit (2 samples, 0.81%) + + + +__kfree_skb (1 samples, 0.40%) + + + +ip_local_deliver (2 samples, 0.81%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +tcp_write_xmit (10 samples, 4.05%) +tcp_.. + + +__dev_queue_xmit (1 samples, 0.40%) + + + +vfs_readv (5 samples, 2.02%) +v.. + + +__local_bh_enable_ip (1 samples, 0.40%) + + + +sock_def_readable (2 samples, 0.81%) + + + +sys_readv (2 samples, 0.81%) + + + +__softirqentry_text_start (4 samples, 1.62%) + + + +ip_queue_xmit (2 samples, 0.81%) + + + +__tcp_push_pending_frames (2 samples, 0.81%) + + + +inet_sendmsg (12 samples, 4.86%) +inet_s.. + + +tcp_rcv_established (1 samples, 0.40%) + + + +tcp_rcv_established (2 samples, 0.81%) + + + +net_rx_action (6 samples, 2.43%) +ne.. + + +ip_finish_output2 (6 samples, 2.43%) +ip.. + + +bictcp_cwnd_event (1 samples, 0.40%) + + + +evbuffer_expand_fast_ (12 samples, 4.86%) +evbuff.. + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +__local_bh_enable_ip (1 samples, 0.40%) + + + +inet_sendmsg (12 samples, 4.86%) +inet_s.. + + +ip_local_out (1 samples, 0.40%) + + + +sock_sendmsg (32 samples, 12.96%) +sock_sendmsg + + +eth_type_trans (1 samples, 0.40%) + + + +[unknown] (1 samples, 0.40%) + + + +__libc_writev (19 samples, 7.69%) +__libc_wri.. + + +__libc_writev (24 samples, 9.72%) +__libc_writev + + +copy_user_enhanced_fast_string (5 samples, 2.02%) +c.. + + +ip_local_deliver_finish (2 samples, 0.81%) + + + +tcp_transmit_skb (6 samples, 2.43%) +tc.. + + +tcp_transmit_skb (1 samples, 0.40%) + + + +do_readv (2 samples, 0.81%) + + + +do_writev (13 samples, 5.26%) +do_wri.. + + +do_readv (1 samples, 0.40%) + + + +ip_finish_output2 (3 samples, 1.21%) + + + +skb_clone (1 samples, 0.40%) + + + +Envoy::Network::FilterManagerImpl::onWrite (1 samples, 0.40%) + + + +ip_local_deliver (1 samples, 0.40%) + + + +skb_copy_datagram_iter (2 samples, 0.81%) + + + +__lock_text_start (1 samples, 0.40%) + + + +sys_writev (24 samples, 9.72%) +sys_writev + + +tcp_write_xmit (6 samples, 2.43%) +tc.. + + +packet_rcv (1 samples, 0.40%) + + + +__netif_receive_skb_core (1 samples, 0.40%) + + + +free_compound_page (1 samples, 0.40%) + + + +rw_copy_check_uvector (1 samples, 0.40%) + + + +tcp_v4_rcv (3 samples, 1.21%) + + + +sock_def_readable (1 samples, 0.40%) + + + +net_rx_action (2 samples, 0.81%) + + + +ip_rcv_finish (1 samples, 0.40%) + + + +sys_readv (2 samples, 0.81%) + + + +__release_sock (1 samples, 0.40%) + + + +ip_local_out (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (4 samples, 1.62%) + + + +ip_finish_output (6 samples, 2.43%) +ip.. + + +arch_local_irq_save (1 samples, 0.40%) + + + +_raw_spin_lock (1 samples, 0.40%) + + + +__sk_dst_check (1 samples, 0.40%) + + + +do_softirq (1 samples, 0.40%) + + + +ip_local_out (2 samples, 0.81%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +do_softirq_own_stack (4 samples, 1.62%) + + + +xen_evtchn_do_upcall (1 samples, 0.40%) + + + +tcp_push (1 samples, 0.40%) + + + +tcp_sendmsg (17 samples, 6.88%) +tcp_sendmsg + + +do_iter_readv_writev (6 samples, 2.43%) +do.. + + +tcp_transmit_skb (3 samples, 1.21%) + + + +dev_queue_xmit (1 samples, 0.40%) + + + +event_queue_remove_active (1 samples, 0.40%) + + + +__dev_queue_xmit (1 samples, 0.40%) + + + +__local_bh_enable_ip (1 samples, 0.40%) + + + +__softirqentry_text_start (2 samples, 0.81%) + + + +__alloc_skb (1 samples, 0.40%) + + + +do_iter_readv_writev (2 samples, 0.81%) + + + +skb_release_all (1 samples, 0.40%) + + + +__local_bh_enable_ip (3 samples, 1.21%) + + + +tcp_write_xmit (1 samples, 0.40%) + + + +__libc_writev (15 samples, 6.07%) +__libc_w.. + + +tcp_v4_rcv (1 samples, 0.40%) + + + +tcp_wfree (1 samples, 0.40%) + + + +vfs_writev (13 samples, 5.26%) +vfs_wr.. + + +tcp_transmit_skb (7 samples, 2.83%) +tc.. + + +sys_readv (2 samples, 0.81%) + + + +__netif_receive_skb (1 samples, 0.40%) + + + +__skb_clone (1 samples, 0.40%) + + + +evbuffer_read (3 samples, 1.21%) + + + +netif_rx_internal (1 samples, 0.40%) + + + +do_readv (3 samples, 1.21%) + + + +sock_write_iter (32 samples, 12.96%) +sock_write_iter + + +tcp_push (7 samples, 2.83%) +tc.. + + +inet_sendmsg (19 samples, 7.69%) +inet_sendmsg + + +ip_local_deliver (3 samples, 1.21%) + + + +entry_SYSCALL_64_fastpath (4 samples, 1.62%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +tcp_transmit_skb (1 samples, 0.40%) + + + +sys_writev (2 samples, 0.81%) + + + +tcp_transmit_skb (10 samples, 4.05%) +tcp_.. + + +do_readv_writev (2 samples, 0.81%) + + + +ip_output (1 samples, 0.40%) + + + +[unknown] (3 samples, 1.21%) + + + +release_sock (1 samples, 0.40%) + + + +__lock_text_start (1 samples, 0.40%) + + + +__netif_receive_skb (2 samples, 0.81%) + + + +ip_local_deliver_finish (3 samples, 1.21%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +sys_readv (3 samples, 1.21%) + + + +__ip_local_out (1 samples, 0.40%) + + + +skb_entail (3 samples, 1.21%) + + + +do_readv_writev (13 samples, 5.26%) +do_rea.. + + +tcp_write_xmit (5 samples, 2.02%) +t.. + + +skb_clone (1 samples, 0.40%) + + + +inet_sendmsg (17 samples, 6.88%) +inet_send.. + + +__libc_readv (2 samples, 0.81%) + + + +tcp_v4_rcv (2 samples, 0.81%) + + + +sys_writev (7 samples, 2.83%) +sy.. + + +do_readv_writev (33 samples, 13.36%) +do_readv_writev + + +pthread_mutex_unlock (1 samples, 0.40%) + + + +tcp_write_xmit (1 samples, 0.40%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +mod_timer (1 samples, 0.40%) + + + +ip_local_deliver_finish (1 samples, 0.40%) + + + +ip_rcv_finish (1 samples, 0.40%) + + + +__put_compound_page (1 samples, 0.40%) + + + +tcp_v4_do_rcv (2 samples, 0.81%) + + + +ip_finish_output2 (1 samples, 0.40%) + + + +inet_sendmsg (32 samples, 12.96%) +inet_sendmsg + + +__local_bh_enable_ip (6 samples, 2.43%) +__.. + + +__netif_receive_skb (5 samples, 2.02%) +_.. + + +ip_queue_xmit (7 samples, 2.83%) +ip.. + + +sock_write_iter (19 samples, 7.69%) +sock_write.. + + +do_readv_writev (24 samples, 9.72%) +do_readv_writev + + +inet_recvmsg (2 samples, 0.81%) + + + +loopback_xmit (1 samples, 0.40%) + + + +sock_write_iter (19 samples, 7.69%) +sock_write.. + + +ktime_get_with_offset (1 samples, 0.40%) + + + +evbuffer_write_atmost (1 samples, 0.40%) + + + +__tcp_ack_snd_check (1 samples, 0.40%) + + + +__libc_readv (2 samples, 0.81%) + + + +tcp_rcv_established (1 samples, 0.40%) + + + +do_softirq (3 samples, 1.21%) + + + +do_iter_readv_writev (1 samples, 0.40%) + + + +__netif_receive_skb_core (2 samples, 0.81%) + + + +__local_bh_enable_ip (1 samples, 0.40%) + + + +tcp_write_xmit (2 samples, 0.81%) + + + +tcp_v4_do_rcv (2 samples, 0.81%) + + + +ip_local_out (5 samples, 2.02%) +i.. + + +sock_write_iter (15 samples, 6.07%) +sock_wri.. + + +tcp_v4_rcv (3 samples, 1.21%) + + + +ip_finish_output2 (8 samples, 3.24%) +ip_.. + + +ip_finish_output (3 samples, 1.21%) + + + +ip_local_deliver_finish (2 samples, 0.81%) + + + +__libc_readv (2 samples, 0.81%) + + + +do_softirq_own_stack (6 samples, 2.43%) +do.. + + +do_softirq_own_stack (4 samples, 1.62%) + + + +ip_finish_output2 (1 samples, 0.40%) + + + +get_page_from_freelist (1 samples, 0.40%) + + + +__tcp_push_pending_frames (10 samples, 4.05%) +__tc.. + + +ip_finish_output (1 samples, 0.40%) + + + +tcp_v4_send_check (1 samples, 0.40%) + + + +tcp_v4_rcv (5 samples, 2.02%) +t.. + + +sys_writev (12 samples, 4.86%) +sys_wr.. + + +net_rx_action (1 samples, 0.40%) + + + +sock_sendmsg (3 samples, 1.21%) + + + +epoll_dispatch (1 samples, 0.40%) + + + +do_readv_writev (1 samples, 0.40%) + + + +sock_sendmsg (12 samples, 4.86%) +sock_s.. + + +copy_user_enhanced_fast_string (3 samples, 1.21%) + + + +__local_bh_enable_ip (4 samples, 1.62%) + + + +__sk_flush_backlog (1 samples, 0.40%) + + + +inet_recvmsg (1 samples, 0.40%) + + + +Envoy::Filter::TcpProxy::onData (3 samples, 1.21%) + + + +tcp_write_xmit (4 samples, 1.62%) + + + +__kfree_skb (1 samples, 0.40%) + + + +sock_read_iter (2 samples, 0.81%) + + + +validate_xmit_skb (1 samples, 0.40%) + + + +dev_hard_start_xmit (3 samples, 1.21%) + + + +ip_finish_output (4 samples, 1.62%) + + + +sk_page_frag_refill (1 samples, 0.40%) + + + +vfs_writev (15 samples, 6.07%) +vfs_writev + + +xen_clocksource_get_cycles (1 samples, 0.40%) + + + +ip_rcv_finish (4 samples, 1.62%) + + + +ip_local_out (8 samples, 3.24%) +ip_.. + + +dev_hard_start_xmit (1 samples, 0.40%) + + + +sock_write_iter (1 samples, 0.40%) + + + +__netif_receive_skb (3 samples, 1.21%) + + + +sock_recvmsg (1 samples, 0.40%) + + + +__libc_writev (2 samples, 0.81%) + + + +tcp_write_xmit (1 samples, 0.40%) + + + +ip_local_deliver (3 samples, 1.21%) + + + +entry_SYSCALL_64_fastpath (24 samples, 9.72%) +entry_SYSCALL_.. + + +entry_SYSCALL_64_fastpath (33 samples, 13.36%) +entry_SYSCALL_64_fas.. + + +ip_rcv_finish (2 samples, 0.81%) + + + +do_softirq (6 samples, 2.43%) +do.. + + +sys_readv (2 samples, 0.81%) + + + +sch_direct_xmit (1 samples, 0.40%) + + + +evbuffer_write_atmost (36 samples, 14.57%) +evbuffer_write_atmost + + +do_iter_readv_writev (4 samples, 1.62%) + + + +vfs_writev (33 samples, 13.36%) +vfs_writev + + +ip_queue_xmit (9 samples, 3.64%) +ip_q.. + + +skb_copy_datagram_iter (1 samples, 0.40%) + + + +tcp_push_one (1 samples, 0.40%) + + + +dev_hard_start_xmit (2 samples, 0.81%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +tcp_nagle_check (1 samples, 0.40%) + + + +__lock_text_start (1 samples, 0.40%) + + + +ip_rcv (1 samples, 0.40%) + + + +do_writev (33 samples, 13.36%) +do_writev + + +vfs_writev (4 samples, 1.62%) + + + +ip_local_deliver_finish (5 samples, 2.02%) +i.. + + +__netif_receive_skb (1 samples, 0.40%) + + + +inet_sendmsg (18 samples, 7.29%) +inet_sendmsg + + +__wake_up_sync_key (1 samples, 0.40%) + + + +tcp_recvmsg (1 samples, 0.40%) + + + +tcp_queue_rcv (1 samples, 0.40%) + + + +tcp_rcv_established (1 samples, 0.40%) + + + +do_readv_writev (1 samples, 0.40%) + + + +ip_local_out (1 samples, 0.40%) + + + +ip_rcv (4 samples, 1.62%) + + + +inet_sendmsg (3 samples, 1.21%) + + + +tcp_push (1 samples, 0.40%) + + + +sock_read_iter (1 samples, 0.40%) + + + +release_sock (1 samples, 0.40%) + + + +tcp_write_xmit (7 samples, 2.83%) +tc.. + + +tcp_schedule_loss_probe (1 samples, 0.40%) + + + +sock_sendmsg (13 samples, 5.26%) +sock_s.. + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +inet_sendmsg (4 samples, 1.62%) + + + +do_iter_readv_writev (15 samples, 6.07%) +do_iter_.. + + +ip_rcv_finish (2 samples, 0.81%) + + + +tcp_ack (1 samples, 0.40%) + + + +sys_writev (3 samples, 1.21%) + + + +ip_local_deliver_finish (4 samples, 1.62%) + + + +event_changelist_remove_all_ (1 samples, 0.40%) + + + +ixgbevf_poll (1 samples, 0.40%) + + + +tcp_wfree (1 samples, 0.40%) + + + +dev_queue_xmit (4 samples, 1.62%) + + + +skb_release_data (1 samples, 0.40%) + + + +do_iter_readv_writev (3 samples, 1.21%) + + + +ip_output (6 samples, 2.43%) +ip.. + + +tcp_push_one (1 samples, 0.40%) + + + +sched_clock_local (1 samples, 0.40%) + + + +ip_output (1 samples, 0.40%) + + + +do_softirq (4 samples, 1.62%) + + + +sk_stream_alloc_skb (1 samples, 0.40%) + + + +Envoy::Network::ConnectionImpl::getReadBuffer (1 samples, 0.40%) + + + +do_iter_readv_writev (7 samples, 2.83%) +do.. + + +do_iter_readv_writev (19 samples, 7.69%) +do_iter_re.. + + +tcp_recvmsg (2 samples, 0.81%) + + + +sk_stream_alloc_skb (1 samples, 0.40%) + + + +envoy (247 samples, 100.00%) +envoy + + +__libc_writev (4 samples, 1.62%) + + + +sock_write_iter (8 samples, 3.24%) +soc.. + + +sock_recvmsg (1 samples, 0.40%) + + + +__libc_writev (12 samples, 4.86%) +__libc.. + + +__tcp_push_pending_frames (5 samples, 2.02%) +_.. + + +ksize (1 samples, 0.40%) + + + +__wake_up_sync_key (2 samples, 0.81%) + + + +__netif_receive_skb (4 samples, 1.62%) + + + +tcp_event_data_recv (1 samples, 0.40%) + + + +irq_exit (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +enqueue_to_backlog (1 samples, 0.40%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +Envoy::Network::ConnectionImpl::onReadReady (16 samples, 6.48%) +Envoy::N.. + + +copy_user_enhanced_fast_string (4 samples, 1.62%) + + + +__dev_queue_xmit (1 samples, 0.40%) + + + +tcp_write_xmit (14 samples, 5.67%) +tcp_wri.. + + +netif_rx (1 samples, 0.40%) + + + +do_iter_readv_writev (12 samples, 4.86%) +do_ite.. + + +sock_recvmsg (1 samples, 0.40%) + + + +vfs_readv (1 samples, 0.40%) + + + +sock_def_readable (1 samples, 0.40%) + + + +tcp_push (3 samples, 1.21%) + + + +entry_SYSCALL_64_fastpath (3 samples, 1.21%) + + + +dev_queue_xmit (2 samples, 0.81%) + + + +tcp_v4_rcv (3 samples, 1.21%) + + + +tcp_push (2 samples, 0.81%) + + + +sock_sendmsg (24 samples, 9.72%) +sock_sendmsg + + +tcp_rcv_established (1 samples, 0.40%) + + + +__netif_receive_skb_core (4 samples, 1.62%) + + + +evbuffer_chain_new (12 samples, 4.86%) +evbuff.. + + +vfs_writev (19 samples, 7.69%) +vfs_writev + + +ip_output (8 samples, 3.24%) +ip_.. + + +do_softirq (5 samples, 2.02%) +d.. + + +__local_bh_enable_ip (3 samples, 1.21%) + + + +do_iter_readv_writev (13 samples, 5.26%) +do_ite.. + + +ixgbevf_poll (1 samples, 0.40%) + + + +import_iovec (1 samples, 0.40%) + + + +do_softirq_own_stack (4 samples, 1.62%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +ip_local_out (1 samples, 0.40%) + + + +__lock_text_start (2 samples, 0.81%) + + + +ip_finish_output (6 samples, 2.43%) +ip.. + + +__lock_text_start (4 samples, 1.62%) + + + +all (247 samples, 100%) + + + +loopback_xmit (2 samples, 0.81%) + + + +tcp_v4_rcv (1 samples, 0.40%) + + + +Envoy::Network::ConnectionImplUtility::updateBufferStats (1 samples, 0.40%) + + + +sch_direct_xmit (1 samples, 0.40%) + + + +xen_hvm_callback_vector (1 samples, 0.40%) + + + +__libc_writev (3 samples, 1.21%) + + + +netif_rx_internal (1 samples, 0.40%) + + + +__netif_receive_skb_core (4 samples, 1.62%) + + + +vfs_readv (2 samples, 0.81%) + + + +Envoy::Network::ConnectionImpl::~ConnectionImpl (2 samples, 0.81%) + + + +ip_rcv (2 samples, 0.81%) + + + +tcp_write_xmit (2 samples, 0.81%) + + + +[unknown] (2 samples, 0.81%) + + + +ip_rcv (3 samples, 1.21%) + + + +__tcp_push_pending_frames (12 samples, 4.86%) +__tcp_.. + + +ip_local_deliver_finish (2 samples, 0.81%) + + + +tcp_write_xmit (15 samples, 6.07%) +tcp_writ.. + + +do_writev (13 samples, 5.26%) +do_wri.. + + +do_iter_readv_writev (3 samples, 1.21%) + + + +do_softirq_own_stack (4 samples, 1.62%) + + + +tcp_write_xmit (1 samples, 0.40%) + + + +evbuffer_drain (3 samples, 1.21%) + + + +process_backlog (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (15 samples, 6.07%) +entry_SY.. + + +__softirqentry_text_start (6 samples, 2.43%) +__.. + + +__wake_up_sync_key (4 samples, 1.62%) + + + +ip_output (8 samples, 3.24%) +ip_.. + + +__wake_up_sync_key (2 samples, 0.81%) + + + +do_writev (7 samples, 2.83%) +do.. + + +copy_user_enhanced_fast_string (4 samples, 1.62%) + + + +sys_writev (2 samples, 0.81%) + + + +ip_local_deliver (3 samples, 1.21%) + + + +ip_rcv (3 samples, 1.21%) + + + +net_rx_action (1 samples, 0.40%) + + + +__tcp_push_pending_frames (5 samples, 2.02%) +_.. + + +sock_sendmsg (17 samples, 6.88%) +sock_send.. + + +xen_clocksource_read (1 samples, 0.40%) + + + +[unknown] (2 samples, 0.81%) + + + +__softirqentry_text_start (6 samples, 2.43%) +__.. + + +pvclock_clocksource_read (1 samples, 0.40%) + + + +__local_bh_enable_ip (2 samples, 0.81%) + + + +__libc_writev (33 samples, 13.36%) +__libc_writev + + +dev_queue_xmit (3 samples, 1.21%) + + + +__skb_clone (1 samples, 0.40%) + + + +__netif_receive_skb_core (4 samples, 1.62%) + + + +skb_entail (1 samples, 0.40%) + + + +sock_sendmsg (7 samples, 2.83%) +so.. + + +ktime_get_with_offset (1 samples, 0.40%) + + + +ip_rcv (1 samples, 0.40%) + + + +sk_page_frag_refill (2 samples, 0.81%) + + + +tcp_send_ack (1 samples, 0.40%) + + + +ip_local_out (6 samples, 2.43%) +ip.. + + +__tcp_push_pending_frames (7 samples, 2.83%) +__.. + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +inet_recvmsg (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (5 samples, 2.02%) +e.. + + +process_backlog (4 samples, 1.62%) + + + +net_rx_action (5 samples, 2.02%) +n.. + + +__inet_lookup_established (1 samples, 0.40%) + + + +__libc_writev (13 samples, 5.26%) +__libc.. + + +Envoy::Network::ConnectionImpl::doReadFromSocket (7 samples, 2.83%) +En.. + + +sock_write_iter (17 samples, 6.88%) +sock_writ.. + + +do_writev (8 samples, 3.24%) +do_.. + + +inet_sendmsg (24 samples, 9.72%) +inet_sendmsg + + +do_readv_writev (3 samples, 1.21%) + + + +netif_rx (2 samples, 0.81%) + + + +__netif_receive_skb_core (6 samples, 2.43%) +__.. + + +__dev_queue_xmit (3 samples, 1.21%) + + + +entry_SYSCALL_64_fastpath (12 samples, 4.86%) +entry_.. + + +do_softirq (2 samples, 0.81%) + + + +sys_epoll_wait (1 samples, 0.40%) + + + +__wake_up_sync_key (2 samples, 0.81%) + + + +ip_output (10 samples, 4.05%) +ip_o.. + + +sock_def_readable (2 samples, 0.81%) + + + +__libc_writev (17 samples, 6.88%) +__libc_wr.. + + +sock_write_iter (12 samples, 4.86%) +sock_w.. + + +do_iter_readv_writev (2 samples, 0.81%) + + + +ip_finish_output2 (1 samples, 0.40%) + + + +do_softirq_own_stack (3 samples, 1.21%) + + + +import_iovec (1 samples, 0.40%) + + + +do_readv_writev (6 samples, 2.43%) +do.. + + +do_writev (3 samples, 1.21%) + + + +do_readv_writev (2 samples, 0.81%) + + + +__netif_receive_skb_core (2 samples, 0.81%) + + + +net_rx_action (1 samples, 0.40%) + + + +__softirqentry_text_start (1 samples, 0.40%) + + + +tc_deletearray_nothrow (2 samples, 0.81%) + + + +sock_recvmsg (2 samples, 0.81%) + + + +do_readv_writev (3 samples, 1.21%) + + + +__netif_receive_skb (2 samples, 0.81%) + + + +ip_output (5 samples, 2.02%) +i.. + + +ip_rcv_finish (4 samples, 1.62%) + + + +pthread_mutex_unlock (1 samples, 0.40%) + + + +process_backlog (2 samples, 0.81%) + + + +tcp_rate_check_app_limited (1 samples, 0.40%) + + + +tcp_push_one (1 samples, 0.40%) + + + +ip_rcv (4 samples, 1.62%) + + + +inet_sendmsg (7 samples, 2.83%) +in.. + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +tcp_v4_do_rcv (1 samples, 0.40%) + + + +skb_release_data (1 samples, 0.40%) + + + +entry_SYSCALL_64_fastpath (1 samples, 0.40%) + + + +tcp_sendmsg (18 samples, 7.29%) +tcp_sendmsg + + +__alloc_skb (1 samples, 0.40%) + + + +__libc_writev (19 samples, 7.69%) +__libc_wri.. + + +ip_rcv_finish (1 samples, 0.40%) + + + +do_softirq (6 samples, 2.43%) +do.. + + +[unknown] (3 samples, 1.21%) + + + +dev_queue_xmit (2 samples, 0.81%) + + + +ip_local_deliver_finish (2 samples, 0.81%) + + + +[unknown] (3 samples, 1.21%) + + + +skb_copy_datagram_iter (1 samples, 0.40%) + + + +xen_hvm_callback_vector (1 samples, 0.40%) + + + +__dev_queue_xmit (1 samples, 0.40%) + + + +sock_sendmsg (18 samples, 7.29%) +sock_sendmsg + + +__dev_queue_xmit (3 samples, 1.21%) + + + +ip_queue_xmit (3 samples, 1.21%) + + + +tcp_recvmsg (2 samples, 0.81%) + + + +ip_finish_output (1 samples, 0.40%) + + + +__wake_up_sync_key (4 samples, 1.62%) + + + +__copy_skb_header (1 samples, 0.40%) + + + +ip_finish_output2 (8 samples, 3.24%) +ip_.. + + +tcmalloc::CentralFreeList::InsertRange (1 samples, 0.40%) + + + +Envoy::Server::WorkerImpl::threadRoutine (211 samples, 85.43%) +Envoy::Server::WorkerImpl::threadRoutine + + +__tcp_push_pending_frames (15 samples, 6.07%) +__tcp_pu.. + + +vfs_readv (2 samples, 0.81%) + + + +loopback_xmit (2 samples, 0.81%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +copy_user_enhanced_fast_string (1 samples, 0.40%) + + + +sock_write_iter (4 samples, 1.62%) + + + +ip_rcv (1 samples, 0.40%) + + + +ip_output (1 samples, 0.40%) + + + +__wake_up_sync_key (1 samples, 0.40%) + + + +do_softirq (2 samples, 0.81%) + + + +ip_finish_output (1 samples, 0.40%) + + + +ip_local_out (4 samples, 1.62%) + + + +__fsnotify_parent (1 samples, 0.40%) + + + +[unknown] (3 samples, 1.21%) + + + +tcp_push (11 samples, 4.45%) +tcp_p.. + + +tcp_push (10 samples, 4.05%) +tcp_.. + + +sock_sendmsg (1 samples, 0.40%) + + + +inet_sendmsg (1 samples, 0.40%) + + + +__kmalloc_node_track_caller (1 samples, 0.40%) + + + +do_iter_readv_writev (1 samples, 0.40%) + + + +vfs_writev (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +net_rx_action (2 samples, 0.81%) + + + +entry_SYSCALL_64_fastpath (2 samples, 0.81%) + + + +tc_malloc (1 samples, 0.40%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +tcp_transmit_skb (3 samples, 1.21%) + + + +skb_release_all (1 samples, 0.40%) + + + +sock_read_iter (2 samples, 0.81%) + + + +do_writev (2 samples, 0.81%) + + + +tcp_push (3 samples, 1.21%) + + + +tcp_sendmsg (6 samples, 2.43%) +tc.. + + +sock_read_iter (1 samples, 0.40%) + + + +kmem_cache_alloc_node (1 samples, 0.40%) + + + +sys_writev (6 samples, 2.43%) +sy.. + + +sock_def_readable (1 samples, 0.40%) + + + +sock_sendmsg (19 samples, 7.69%) +sock_sendmsg + + +inet_recvmsg (4 samples, 1.62%) + + + +tcp_sendmsg (2 samples, 0.81%) + + + +sk_stream_alloc_skb (1 samples, 0.40%) + + + +ip_local_out (4 samples, 1.62%) + + + +__libc_readv (3 samples, 1.21%) + + + +sys_writev (19 samples, 7.69%) +sys_writev + + +net_rx_action (1 samples, 0.40%) + + + +skb_copy_datagram_iter (3 samples, 1.21%) + + + +skb_clone (1 samples, 0.40%) + + + +eth_type_trans (1 samples, 0.40%) + + + +ip_queue_xmit (1 samples, 0.40%) + + + +__lock_text_start (1 samples, 0.40%) + + + +__tcp_push_pending_frames (7 samples, 2.83%) +__.. + + +sock_def_readable (1 samples, 0.40%) + + + +tcp_transmit_skb (5 samples, 2.02%) +t.. + + diff --git a/api/diagrams/later-ack.svg b/api/diagrams/later-ack.svg new file mode 100644 index 000000000000..7584c54f036a --- /dev/null +++ b/api/diagrams/later-ack.svg @@ -0,0 +1,8 @@ +participant Envoy as E [color="black"] +participant Management Server as M [color="black"] + +E->M: (V=,R={foo},N=,T=EDS) [color="green"] +M->E: (V=X,R={foo:...},N=A,T=EDS) [color="gray"] +E->M: (V=,R={foo},N=A,T=EDS) [color="red"] +M->E: (V=Y,R={foo:...},N=B,T=EDS) [color="gray"] +E->M: (V=Y,R={foo},N=B,T=EDS) [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement ServerManagement Server(V=,R={foo},N=,T=EDS)(V=X,R={foo:...},N=A,T=EDS)(V=,R={foo},N=A,T=EDS)(V=Y,R={foo:...},N=B,T=EDS)(V=Y,R={foo},N=B,T=EDS) \ No newline at end of file diff --git a/api/diagrams/simple-ack.svg b/api/diagrams/simple-ack.svg new file mode 100644 index 000000000000..148c26170f60 --- /dev/null +++ b/api/diagrams/simple-ack.svg @@ -0,0 +1,6 @@ +participant Envoy as E [color="black"] +participant Management Server as M [color="black"] + +E->M: (V=,R={foo},N=,T=EDS) [color="green"] +M->E: (V=X,R={foo:...},N=A,T=EDS) [color="gray"] +E->M: (V=X,R={foo},N=A,T=EDS) [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement ServerManagement Server(V=,R={foo},N=,T=EDS)(V=X,R={foo:...},N=A,T=EDS)(V=X,R={foo},N=A,T=EDS) \ No newline at end of file diff --git a/api/diagrams/simple-nack.svg b/api/diagrams/simple-nack.svg new file mode 100644 index 000000000000..f2d4138319e3 --- /dev/null +++ b/api/diagrams/simple-nack.svg @@ -0,0 +1,6 @@ +participant Envoy as E [color="black"] +participant Management Server as M [color="black"] + +E->M: (V=,R={foo},N=,T=EDS) [color="green"] +M->E: (V=X,R={foo:...},N=A,T=EDS) [color="gray"] +E->M: (V=,R={foo},N=A,T=EDS) [color="red"]Created with Raphaël 2.2.0EnvoyEnvoyManagement ServerManagement Server(V=,R={foo},N=,T=EDS)(V=X,R={foo:...},N=A,T=EDS)(V=,R={foo},N=A,T=EDS) \ No newline at end of file diff --git a/api/diagrams/stale-requests.svg b/api/diagrams/stale-requests.svg new file mode 100644 index 000000000000..0b440c974624 --- /dev/null +++ b/api/diagrams/stale-requests.svg @@ -0,0 +1,11 @@ +participant Envoy as E [color="black"] +participant Management Server as M [color="black"] + +E->M: (V=X,R={foo},N=A,T=EDS) [color="green"] +Note right of M: Stale +E->M: (V=X,R={foo,bar},N=A,T=EDS) [color="green"] +M->E: (V=Y,R={foo:...,bar:...},N=B,T=EDS) [color="gray"] +E->M: (V=X,R={foo,baz},N=A,T=EDS) [color="green"] +Note right of M: Stale +E->M: (V=Y,R={foo,baz},N=B,T=EDS) [color="green"] +M->E: (V=Z,R={foo:...,baz:...},N=C,T=EDS) [color="gray"]Created with Raphaël 2.2.0EnvoyEnvoyManagement ServerManagement Server(V=X,R={foo},N=A,T=EDS)Stale(V=X,R={foo,bar},N=A,T=EDS)(V=Y,R={foo:...,bar:...},N=B,T=EDS)(V=X,R={foo,baz},N=A,T=EDS)Stale(V=Y,R={foo,baz},N=B,T=EDS)(V=Z,R={foo:...,baz:...},N=C,T=EDS) \ No newline at end of file diff --git a/api/diagrams/update-race.svg b/api/diagrams/update-race.svg new file mode 100644 index 000000000000..1428bc7a744c --- /dev/null +++ b/api/diagrams/update-race.svg @@ -0,0 +1,11 @@ +participant Envoy as E [color="black"] +participant Management Server 0 as M0 [color="black"] +participant Management Server 1 as M1 [color="black"] + +E->M1: (V=..,R={},N=..,T=CDS) [color="green"] +E->M0: (V=X,R={foo},N=A,T=EDS) [color="green"] +M1->E: (V=M,R={foo:...,bar:...},N=D,T=CDS) [color="gray"] +E->M0: (V=X,R={foo,bar},N=A,T=EDS [color="green"] +Note right of M0: Management server 0 replies\nbefore processing resource update at X +M0->E: (V=Y,R={foo:...,},N=B,T=EDS) [color="gray"] +E->M0: (V=Y,R={foo,bar},N=B,T=EDS [color="green"]Created with Raphaël 2.2.0EnvoyEnvoyManagement Server 0Management Server 0Management Server 1Management Server 1(V=..,R={},N=..,T=CDS)(V=X,R={foo},N=A,T=EDS)(V=M,R={foo:...,bar:...},N=D,T=CDS)(V=X,R={foo,bar},N=A,T=EDSManagement server 0 repliesbefore processing resource update at X(V=Y,R={foo:...,},N=B,T=EDS)(V=Y,R={foo,bar},N=B,T=EDS \ No newline at end of file diff --git a/api/docs/BUILD b/api/docs/BUILD new file mode 100644 index 000000000000..149f241069e5 --- /dev/null +++ b/api/docs/BUILD @@ -0,0 +1,57 @@ +licenses(["notice"]) # Apache 2 + +package_group( + name = "docs", + packages = [ + "//docs", + ], +) + +# TODO(htuch): Grow this to cover everything we want to generate docs for, so we can just invoke +# bazel build //docs:protos --aspects tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst +proto_library( + name = "protos", + deps = [ + "//envoy/api/v2:cds", + "//envoy/api/v2:discovery", + "//envoy/api/v2:eds", + "//envoy/api/v2:lds", + "//envoy/api/v2:rds", + "//envoy/api/v2/cluster:circuit_breaker", + "//envoy/api/v2/cluster:outlier_detection", + "//envoy/api/v2/core:protocol", + "//envoy/api/v2/listener", + "//envoy/api/v2/ratelimit", + "//envoy/api/v2/route", + "//envoy/config/bootstrap/v2:bootstrap", + "//envoy/config/filter/accesslog/v2:accesslog", + "//envoy/config/filter/http/buffer/v2:buffer", + "//envoy/config/filter/http/ext_authz/v2alpha:ext_authz", + "//envoy/config/filter/http/fault/v2:fault", + "//envoy/config/filter/http/gzip/v2:gzip", + "//envoy/config/filter/http/health_check/v2:health_check", + "//envoy/config/filter/http/ip_tagging/v2:ip_tagging", + "//envoy/config/filter/http/lua/v2:lua", + "//envoy/config/filter/http/rate_limit/v2:rate_limit", + "//envoy/config/filter/http/router/v2:router", + "//envoy/config/filter/http/squash/v2:squash", + "//envoy/config/filter/http/transcoder/v2:transcoder", + "//envoy/config/filter/network/client_ssl_auth/v2:client_ssl_auth", + "//envoy/config/filter/network/ext_authz/v2:ext_authz", + "//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager", + "//envoy/config/filter/network/mongo_proxy/v2:mongo_proxy", + "//envoy/config/filter/network/rate_limit/v2:rate_limit", + "//envoy/config/filter/network/redis_proxy/v2:redis_proxy", + "//envoy/config/filter/network/tcp_proxy/v2:tcp_proxy", + "//envoy/config/health_checker/redis/v2:redis", + "//envoy/config/metrics/v2:metrics_service", + "//envoy/config/metrics/v2:stats", + "//envoy/config/ratelimit/v2:rls", + "//envoy/config/trace/v2:trace", + "//envoy/service/discovery/v2:ads", + "//envoy/service/load_stats/v2:lrs", + "//envoy/service/metrics/v2:metrics_service", + "//envoy/type:percent", + "//envoy/type:range", + ], +) diff --git a/api/envoy/admin/v2/BUILD b/api/envoy/admin/v2/BUILD new file mode 100644 index 000000000000..0e1ab5f23665 --- /dev/null +++ b/api/envoy/admin/v2/BUILD @@ -0,0 +1,12 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "config_dump", + srcs = ["config_dump.proto"], + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v2:rds", + ], +) diff --git a/api/envoy/admin/v2/config_dump.proto b/api/envoy/admin/v2/config_dump.proto new file mode 100644 index 000000000000..5ed8ee0b01e1 --- /dev/null +++ b/api/envoy/admin/v2/config_dump.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package envoy.admin.v2; + +import "google/protobuf/any.proto"; +import "envoy/api/v2/rds.proto"; + +import "gogoproto/gogo.proto"; + +// [#protodoc-title: ConfigDump] +// [#proto-status: draft] + +// The /config_dump admin endpoint uses this wrapper message to maintain and serve arbitrary +// configuration information from any component in Envoy. +// TODO(jsedgwick) In the future, we may want to formalize this further with an RPC for config_dump, +// and perhaps even with an RPC per config type. That strategy across all endpoints will allow for +// more flexibility w.r.t. protocol, serialization, parameters, etc. +message ConfigDump { + // This map is serialized and dumped in its entirety at the /config_dump endpoint. + // + // Keys should be a short descriptor of the config object they map to. For example, envoy's HTTP + // routing subsystem might use "routes" as the key for its config, for which it uses the message + // RouteConfigDump as defined below. In the future, the key will also be used to filter the output + // of the /config_dump endpoint. + map configs = 1 [(gogoproto.nullable) = false]; +} + +// Envoy's RDS implementation fills this message with all currently loaded routes, as described by +// their RouteConnfiguration objects. Static routes configured in the bootstrap configuration are +// separated from those configured dynamically via RDS. This message is available at the +// /config_dump admin endpoint. +message RouteConfigDump { + repeated envoy.api.v2.RouteConfiguration static_route_configs = 1 [(gogoproto.nullable) = false]; + repeated envoy.api.v2.RouteConfiguration dynamic_route_configs = 2 [(gogoproto.nullable) = false]; +} diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD new file mode 100644 index 000000000000..42a89c91e18c --- /dev/null +++ b/api/envoy/api/v2/BUILD @@ -0,0 +1,137 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +# Friends of core API packages - filters, services, service configs. +# Package //envoy/api/v2 contains xDS and discovery definitions that should +# be in //envoy/service/discovery, but remain here for backwards compatibility. +package_group( + name = "friends", + packages = [ + "//envoy/admin/...", + "//envoy/api/v2", + "//envoy/config/...", + "//envoy/service/...", + ], +) + +api_proto_library( + name = "discovery", + srcs = ["discovery.proto"], + visibility = [":friends"], + deps = ["//envoy/api/v2/core:base"], +) + +api_go_proto_library( + name = "discovery", + proto = ":discovery", + deps = ["//envoy/api/v2/core:base_go_proto"], +) + +api_proto_library( + name = "eds", + srcs = ["eds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:health_check", + "//envoy/api/v2/endpoint", + ], +) + +api_go_grpc_library( + name = "eds", + proto = ":eds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:health_check_go_proto", + "//envoy/api/v2/endpoint:endpoint_go_proto", + ], +) + +api_proto_library( + name = "cds", + srcs = ["cds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v2/auth:cert", + "//envoy/api/v2/cluster:circuit_breaker", + "//envoy/api/v2/cluster:outlier_detection", + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", + "//envoy/api/v2/core:health_check", + "//envoy/api/v2/core:protocol", + "//envoy/type:percent", + ], +) + +api_go_grpc_library( + name = "cds", + proto = ":cds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v2/auth:cert_go_proto", + "//envoy/api/v2/cluster:circuit_breaker_go_proto", + "//envoy/api/v2/cluster:outlier_detection_go_proto", + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", + "//envoy/api/v2/core:health_check_go_proto", + "//envoy/api/v2/core:protocol_go_proto", + "//envoy/type:percent_go_proto", + ], +) + +api_proto_library( + name = "lds", + srcs = ["lds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/api/v2/listener", + ], +) + +api_go_grpc_library( + name = "lds", + proto = ":lds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/listener:listener_go_proto", + ], +) + +api_proto_library( + name = "rds", + srcs = ["rds.proto"], + has_services = 1, + visibility = [":friends"], + deps = [ + ":discovery", + "//envoy/api/v2/core:base", + "//envoy/api/v2/route", + ], +) + +api_go_grpc_library( + name = "rds", + proto = ":rds", + deps = [ + ":discovery_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/route:route_go_proto", + ], +) diff --git a/api/envoy/api/v2/README.md b/api/envoy/api/v2/README.md new file mode 100644 index 000000000000..984be690a103 --- /dev/null +++ b/api/envoy/api/v2/README.md @@ -0,0 +1,9 @@ +Protocol buffer definitions for xDS and top-level resource API messages. + +Package group `//envoy/api/v2:friends` enumerates all consumers of the shared +API messages. That includes package envoy.api.v2 itself, which contains several +xDS definitions. Default visibility for all shared definitions should be set to +`//envoy/api/v2:friends`. + +Additionally, packages envoy.api.v2.core and envoy.api.v2.auth are also +consumed throughout the subpackages of `//envoy/api/v2`. diff --git a/api/envoy/api/v2/auth/BUILD b/api/envoy/api/v2/auth/BUILD new file mode 100644 index 000000000000..461360c6c48f --- /dev/null +++ b/api/envoy/api/v2/auth/BUILD @@ -0,0 +1,52 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +package_group( + name = "friends", + includes = [ + "//envoy/api/v2:friends", + ], + packages = [ + "//envoy/api/v2/cluster", + "//envoy/api/v2/endpoint", + "//envoy/api/v2/listener", + "//envoy/api/v2/route", + ], +) + +api_proto_library( + name = "auth", + srcs = ["auth.proto"], + visibility = [":friends"], + deps = [ + ":cert", + ], +) + +api_go_proto_library( + name = "auth", + proto = ":auth", + deps = [ + ":cert_go_proto", + ], +) + +api_proto_library( + name = "cert", + srcs = ["cert.proto"], + visibility = [":friends"], + deps = [ + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", + ], +) + +api_go_proto_library( + name = "cert", + proto = ":cert", + deps = [ + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", + ], +) diff --git a/api/envoy/api/v2/auth/auth.proto b/api/envoy/api/v2/auth/auth.proto new file mode 100644 index 000000000000..34b1e2b7edb7 --- /dev/null +++ b/api/envoy/api/v2/auth/auth.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +// [#proto-status: draft] + +package envoy.api.v2.auth; +option go_package = "auth"; + +import "envoy/api/v2/auth/cert.proto"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +message AuthAction { + // Should we do white-list or black-list style access control. + enum ActionType { + // Request matches all rules are allowed, otherwise denied. + ALLOW = 0; + // Request matches all rules or missing required auth fields are denied, + // otherwise allowed. + DENY = 1; + } + + ActionType action_type = 1; + + // Logic AND that requires all rules match. + message AndRule { + repeated Rule rules = 1; + } + + // Logic OR that requires at least one rule matches. + message OrRule { + repeated Rule rules = 1; + } + + // Check peer identity using X.509 certificate. + message X509Rule { + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + } + + // Element type of AndRule/OrRule, it chooses among different type of rule. + message Rule { + oneof rule_specifier { + AndRule and_rule = 1; + OrRule or_rule = 2; + X509Rule x509_rule = 3; + } + } + + // List of rules + repeated Rule rules = 2; +} diff --git a/api/envoy/api/v2/auth/cert.proto b/api/envoy/api/v2/auth/cert.proto new file mode 100644 index 000000000000..5703c483f35f --- /dev/null +++ b/api/envoy/api/v2/auth/cert.proto @@ -0,0 +1,260 @@ +syntax = "proto3"; + +package envoy.api.v2.auth; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Common TLS configuration] + +message TlsParameters { + enum TlsProtocol { + // Envoy will choose the optimal TLS version. + TLS_AUTO = 0; + + // TLS 1.0 + TLSv1_0 = 1; + + // TLS 1.1 + TLSv1_1 = 2; + + // TLS 1.2 + TLSv1_2 = 3; + + // TLS 1.3 + TLSv1_3 = 4; + } + + // Minimum TLS protocol version. + TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum.defined_only = true]; + + // Maximum TLS protocol version. + TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum.defined_only = true]; + + // If specified, the TLS listener will only support the specified `cipher list + // `_. + // If not specified, the default list: + // + // .. code-block:: none + // + // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + // ECDHE-ECDSA-AES128-SHA256 + // ECDHE-RSA-AES128-SHA256 + // ECDHE-ECDSA-AES128-SHA + // ECDHE-RSA-AES128-SHA + // AES128-GCM-SHA256 + // AES128-SHA256 + // AES128-SHA + // ECDHE-ECDSA-AES256-GCM-SHA384 + // ECDHE-RSA-AES256-GCM-SHA384 + // ECDHE-ECDSA-AES256-SHA384 + // ECDHE-RSA-AES256-SHA384 + // ECDHE-ECDSA-AES256-SHA + // ECDHE-RSA-AES256-SHA + // AES256-GCM-SHA384 + // AES256-SHA256 + // AES256-SHA + // + // will be used. + repeated string cipher_suites = 3; + + // If specified, the TLS connection will only support the specified ECDH + // curves. If not specified, the default curves (X25519, P-256) will be used. + repeated string ecdh_curves = 4; +} + +message TlsCertificate { + // The TLS certificate chain. + core.DataSource certificate_chain = 1; + + // The TLS private key. + core.DataSource private_key = 2; + + // [#not-implemented-hide:] + core.DataSource password = 3; + + // [#not-implemented-hide:] + core.DataSource ocsp_staple = 4; + + // [#not-implemented-hide:] + repeated core.DataSource signed_certificate_timestamp = 5; +} + +message TlsSessionTicketKeys { + // Keys for encrypting and decrypting TLS session tickets. The + // first key in the array contains the key to encrypt all new sessions created by this context. + // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + // by, for example, putting the new key first, and the previous key second. + // + // If :ref:`session_ticket_keys ` + // is not specified, the TLS library will still support resuming sessions via tickets, but it will + // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts + // or on different hosts. + // + // Each key must contain exactly 80 bytes of cryptographically-secure random data. For + // example, the output of ``openssl rand 80``. + // + // .. attention:: + // + // Using this feature has serious security considerations and risks. Improper handling of keys + // may result in loss of secrecy in connections, even if ciphers supporting perfect forward + // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some + // discussion. To minimize the risk, you must: + // + // * Keep the session ticket keys at least as secure as your TLS certificate private keys + // * Rotate session ticket keys at least daily, and preferably hourly + // * Always generate keys using a cryptographically-secure random data source + repeated core.DataSource keys = 1 [(validate.rules).repeated .min_items = 1]; +} + +message CertificateValidationContext { + // TLS certificate data containing certificate authority certificates to use in verifying + // a presented peer certificate (e.g. server certificate for clusters or client certificate + // for listeners). If not specified and a peer certificate is presented it will not be + // verified. By default, a client certificate is optional, unless one of the additional + // options (:ref:`require_client_certificate + // `, + // :ref:`verify_certificate_hash + // `, or + // :ref:`verify_subject_alt_name + // `) is also + // specified. + // + // See :ref:`the TLS overview ` for a list of common + // system CA locations. + core.DataSource trusted_ca = 1; + + // If specified, Envoy will verify (pin) the hex-encoded SHA-256 fingerprint of + // the presented certificate. + // + // For example, ``openssl`` can produce a SHA-256 fingerprint of an x509 certificate + // with the following command: + // + // .. code-block:: bash + // + // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 + repeated string verify_certificate_hash = 2; + + // If specified, Envoy will verify (pin) base64-encoded SHA-256 hash of + // the Subject Public Key Information (SPKI) of the presented certificate. + // This is the same format as used in HTTP Public Key Pinning. + // [#not-implemented-hide:] + repeated string verify_spki_sha256 = 3; + + // An optional list of subject alternative names. If specified, Envoy will verify that + // the certificate’s subject alternative name matches one of the specified values. + repeated string verify_subject_alt_name = 4; + + // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. + google.protobuf.BoolValue require_ocsp_staple = 5; + + // [#not-implemented-hide:] Must present signed certificate time-stamp. + google.protobuf.BoolValue require_signed_certificate_timestamp = 6; + + // An optional `certificate revocation list + // `_ + // (in PEM format). If specified, Envoy will verify that the presented peer + // certificate has not been revoked by this CRL. If this DataSource contains + // multiple CRLs, all of them will be used. + core.DataSource crl = 7; +} + +// TLS context shared by both client and server TLS contexts. +message CommonTlsContext { + // TLS protocol versions, cipher suites etc. + TlsParameters tls_params = 1; + + // Multiple TLS certificates can be associated with the same context. + // E.g. to allow both RSA and ECDSA certificates, two TLS certificates can be configured. + // + // .. attention:: + // + // Although this is a list, currently only a single certificate is supported. This will be + // relaxed in the future. + repeated TlsCertificate tls_certificates = 2 [(validate.rules).repeated .max_items = 1]; + + // [#not-implemented-hide:] + repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6; + + // How to validate peer certificates. + CertificateValidationContext validation_context = 3; + + // Supplies the list of ALPN protocols that the listener should expose. In + // practice this is likely to be set to one of two values (see the + // :ref:`codec_type ` parameter in the HTTP connection + // manager for more information): + // + // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + // * "http/1.1" If the listener is only going to support HTTP/1.1. + // + // There is no default for this parameter. If empty, Envoy will not expose ALPN. + repeated string alpn_protocols = 4; + + // These fields are deprecated and only are used during the interim v1 -> v2 + // transition period for internal purposes. They should not be used outside of + // the Envoy binary. [#not-implemented-hide:] + message DeprecatedV1 { + string alt_alpn_protocols = 1; + } + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 5 [deprecated = true]; +} + +message UpstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // SNI string to use when creating TLS backend connections. + string sni = 2; +} + +message DownstreamTlsContext { + // Common TLS context settings. + CommonTlsContext common_tls_context = 1; + + // If specified, Envoy will reject connections without a valid client + // certificate. + google.protobuf.BoolValue require_client_certificate = 2; + + // If specified, Envoy will reject connections without a valid and matching SNI. + // [#not-implemented-hide:] + google.protobuf.BoolValue require_sni = 3; + + oneof session_ticket_keys_type { + // TLS session ticket key settings. + TlsSessionTicketKeys session_ticket_keys = 4; + + // [#not-implemented-hide:] + SdsSecretConfig session_ticket_keys_sds_secret_config = 5; + } +} + +// [#proto-status: experimental] +// [#not-implemented-hide:] +message SdsSecretConfig { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + // When both name and config are specified, then secret can be fetched and/or reloaded via SDS. + // When only name is specified, then secret will be loaded from static resources [V2-API-DIFF]. + string name = 1; + core.ConfigSource sds_config = 2; +} + +// [#proto-status: experimental] +// [#not-implemented-hide:] +message Secret { + // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. + string name = 1; + oneof type { + TlsCertificate tls_certificate = 2; + TlsSessionTicketKeys session_ticket_keys = 3; + } +} diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto new file mode 100644 index 000000000000..7d5c201605a8 --- /dev/null +++ b/api/envoy/api/v2/cds.proto @@ -0,0 +1,437 @@ +syntax = "proto3"; + +package envoy.api.v2; + +option java_generic_services = true; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/discovery.proto"; +import "envoy/api/v2/core/health_check.proto"; +import "envoy/api/v2/core/protocol.proto"; +import "envoy/api/v2/cluster/circuit_breaker.proto"; +import "envoy/api/v2/cluster/outlier_detection.proto"; +import "envoy/type/percent.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// Return list of all clusters this proxy will load balance to. +service ClusterDiscoveryService { + rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:clusters" + body: "*" + }; + } +} + +// [#protodoc-title: Clusters] + +// Configuration for a single upstream cluster. +// [#comment:next free field: 30] +message Cluster { + // Supplies the name of the cluster which must be unique across all clusters. + // The cluster name is used when emitting + // :ref:`statistics ` if :ref:`alt_stat_name + // ` is not provided. + // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + // By default, the maximum length of a cluster name is limited to 60 + // characters. This limit can be increased by setting the + // :option:`--max-obj-name-len` command line argument to the desired value. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // An optional alternative to the cluster name to be used while emitting stats. + // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be + // confused with :ref:`Router Filter Header + // `. + string alt_stat_name = 28; + + // Refer to :ref:`service discovery type ` + // for an explanation on each type. + enum DiscoveryType { + // Refer to the :ref:`static discovery type` + // for an explanation. + STATIC = 0; + + // Refer to the :ref:`strict DNS discovery + // type` + // for an explanation. + STRICT_DNS = 1; + + // Refer to the :ref:`logical DNS discovery + // type` + // for an explanation. + LOGICAL_DNS = 2; + + // Refer to the :ref:`service discovery type` + // for an explanation. + EDS = 3; + + // Refer to the :ref:`original destination discovery + // type` + // for an explanation. + ORIGINAL_DST = 4; + } + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; + + // Only valid when discovery type is EDS. + message EdsClusterConfig { + // Configuration for the source of EDS updates for this Cluster. + core.ConfigSource eds_config = 1; + + // Optional alternative to cluster name to present to EDS. This does not + // have the same restrictions as cluster name, i.e. it may be arbitrary + // length. + string service_name = 2; + } + // Configuration to use for EDS updates for the Cluster. + EdsClusterConfig eds_cluster_config = 3; + + // The timeout for new network connections to hosts in the cluster. + google.protobuf.Duration connect_timeout = 4 [ + (validate.rules).duration.gt = {}, + (gogoproto.stdduration) = true, + (gogoproto.nullable) = false + ]; + + // Soft limit on size of the cluster’s connections read and write buffers. If + // unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Refer to :ref:`load balancer type ` architecture + // overview section for information on each type. + enum LbPolicy { + + // Refer to the :ref:`round robin load balancing + // policy` + // for an explanation. + ROUND_ROBIN = 0; + + // Refer to the :ref:`least request load balancing + // policy` + // for an explanation. + LEAST_REQUEST = 1; + + // Refer to the :ref:`ring hash load balancing + // policy` + // for an explanation. + RING_HASH = 2; + + // Refer to the :ref:`random load balancing + // policy` + // for an explanation. + RANDOM = 3; + + // Refer to the :ref:`original destination load balancing + // policy` + // for an explanation. + ORIGINAL_DST_LB = 4; + + // Refer to the :ref:`Maglev load balancing policy` + // for an explanation. + MAGLEV = 5; + } + // The :ref:`load balancer type ` to use + // when picking a host in the cluster. + LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true]; + + // If the service discovery type is + // :ref:`STATIC`, + // :ref:`STRICT_DNS` + // or :ref:`LOGICAL_DNS`, + // then hosts is required. + repeated core.Address hosts = 7; + + // Optional :ref:`active health checking ` + // configuration for the cluster. If no + // configuration is specified no health checking will be done and all cluster + // members will be considered healthy at all times. + repeated core.HealthCheck health_checks = 8; + + // Optional maximum requests for a single upstream connection. This parameter + // is respected by both the HTTP/1.1 and HTTP/2 connection pool + // implementations. If not specified, there is no limit. Setting this + // parameter to 1 will effectively disable keep alive. + google.protobuf.UInt32Value max_requests_per_connection = 9; + + // Optional :ref:`circuit breaking ` for the cluster. + cluster.CircuitBreakers circuit_breakers = 10; + + // The TLS configuration for connections to the upstream cluster. If no TLS + // configuration is specified, TLS will not be used for new connections. + // + // .. attention:: + // + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. + auth.UpstreamTlsContext tls_context = 11; + + reserved 12; + + // Additional options when handling HTTP requests. These options will be applicable to both + // HTTP1 and HTTP2 requests. + core.HttpProtocolOptions common_http_protocol_options = 29; + + // Additional options when handling HTTP1 requests. + core.Http1ProtocolOptions http_protocol_options = 13; + + // Even if default HTTP2 protocol options are desired, this field must be + // set so that Envoy will assume that the upstream supports HTTP/2 when + // making new HTTP connection pool connections. Currently, Envoy only + // supports prior knowledge for upstream connections. Even if TLS is used + // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 + // connections to happen over plain text. + core.Http2ProtocolOptions http2_protocol_options = 14; + + reserved 15; + + // If the DNS refresh rate is specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used as the cluster’s DNS refresh + // rate. If this setting is not specified, the value defaults to 5000. For + // cluster types other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // When V4_ONLY is selected, the DNS resolver will only perform a lookup for + // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + // only perform a lookup for addresses in the IPv6 family. If AUTO is + // specified, the DNS resolver will first perform a lookup for addresses in + // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + // For cluster types other than + // :ref:`STRICT_DNS` and + // :ref:`LOGICAL_DNS`, + // this setting is + // ignored. + enum DnsLookupFamily { + AUTO = 0; + V4_ONLY = 1; + V6_ONLY = 2; + } + + // The DNS IP address resolution policy. If this setting is not specified, the + // value defaults to + // :ref:`AUTO`. + DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true]; + + // If DNS resolvers are specified and the cluster type is either + // :ref:`STRICT_DNS`, + // or :ref:`LOGICAL_DNS`, + // this value is used to specify the cluster’s dns resolvers. + // If this setting is not specified, the value defaults to the default + // resolver, which uses /etc/resolv.conf for configuration. For cluster types + // other than + // :ref:`STRICT_DNS` + // and :ref:`LOGICAL_DNS` + // this setting is ignored. + repeated core.Address dns_resolvers = 18; + + // If specified, outlier detection will be enabled for this upstream cluster. + // Each of the configuration values can be overridden via + // :ref:`runtime values `. + cluster.OutlierDetection outlier_detection = 19; + + // The interval for removing stale hosts from a cluster type + // :ref:`ORIGINAL_DST`. + // Hosts are considered stale if they have not been used + // as upstream destinations during this interval. New hosts are added + // to original destination clusters on demand as new connections are + // redirected to Envoy, causing the number of hosts in the cluster to + // grow over time. Hosts that are not stale (they are actively used as + // destinations) are kept in the cluster, which allows connections to + // them remain open, saving the latency that would otherwise be spent + // on opening new connections. If this setting is not specified, the + // value defaults to 5000ms. For cluster types other than + // :ref:`ORIGINAL_DST` + // this setting is ignored. + google.protobuf.Duration cleanup_interval = 20 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // Optional configuration used to bind newly established upstream connections. + // This overrides any bind_config specified in the bootstrap proto. + // If the address and port are empty, no bind will be performed. + core.BindConfig upstream_bind_config = 21; + + // Optionally divide the endpoints in this cluster into subsets defined by + // endpoint metadata and selected by route and weighted cluster metadata. + message LbSubsetConfig { + + // If NO_FALLBACK is selected, a result + // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + // any cluster endpoint may be returned (subject to policy, health checks, + // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + // endpoints matching the values from the default_subset field. + enum LbSubsetFallbackPolicy { + NO_FALLBACK = 0; + ANY_ENDPOINT = 1; + DEFAULT_SUBSET = 2; + } + + // The behavior used when no endpoint subset matches the selected route's + // metadata. The value defaults to + // :ref:`NO_FALLBACK`. + LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true]; + + // Specifies the default subset of endpoints used during fallback if + // fallback_policy is + // :ref:`DEFAULT_SUBSET`. + // Each field in default_subset is + // compared to the matching LbEndpoint.Metadata under the *envoy.lb* + // namespace. It is valid for no hosts to match, in which case the behavior + // is the same as a fallback_policy of + // :ref:`NO_FALLBACK`. + google.protobuf.Struct default_subset = 2; + + // Specifications for subsets. + message LbSubsetSelector { + // List of keys to match with the weighted cluster metadata. + repeated string keys = 1; + } + + // For each entry, LbEndpoint.Metadata's + // *envoy.lb* namespace is traversed and a subset is created for each unique + // combination of key and value. For example: + // + // .. code-block:: json + // + // { "subset_selectors": [ + // { "keys": [ "version" ] }, + // { "keys": [ "stage", "hardware_type" ] } + // ]} + // + // A subset is matched when the metadata from the selected route and + // weighted cluster contains the same keys and values as the subset's + // metadata. The same host may appear in multiple subsets. + repeated LbSubsetSelector subset_selectors = 3; + } + + // Configuration for load balancing subsetting. + LbSubsetConfig lb_subset_config = 22; + + // Specific configuration for the :ref:`RingHash` + // load balancing policy. + message RingHashLbConfig { + // Minimum hash ring size, i.e. total virtual nodes. A larger size + // will provide better request distribution since each host in the + // cluster will have more virtual nodes. Defaults to 1024. In the case + // that total number of hosts is greater than the minimum, each host will + // be allocated a single virtual node. + google.protobuf.UInt64Value minimum_ring_size = 1; + + // [#not-implemented-hide:] Hide from docs. + message DeprecatedV1 { + // Defaults to true, meaning that std::hash is used to hash hosts onto + // the ketama ring. std::hash can vary by platform. For this reason, + // Envoy will eventually use `xxHash `_ + // by default. This field exists for + // migration purposes and will eventually be deprecated. Set it to false + // to use `xxHash `_ now. + google.protobuf.BoolValue use_std_hash = 1; + } + + // Deprecated settings from v1 config. + // [#not-implemented-hide:] Hide from docs. + DeprecatedV1 deprecated_v1 = 2 [deprecated = true]; + } + + // Optional configuration for the load balancing algorithm selected by + // LbPolicy. Currently only + // :ref:`RING_HASH` + // has additional configuration options. + // Specifying ring_hash_lb_config without setting the LbPolicy to + // :ref:`RING_HASH` + // will generate an error at runtime. + oneof lb_config { + // Optional configuration for the Ring Hash load balancing policy. + RingHashLbConfig ring_hash_lb_config = 23; + } + + // Common configuration for all load balancer implementations. + message CommonLbConfig { + // Configures the :ref:`healthy panic threshold `. + // If not specified, the default is 50%. + // + // .. note:: + // The specified percent will be truncated to the nearest 1%. + envoy.type.Percent healthy_panic_threshold = 1; + // Configuration for :ref:`zone aware routing + // `. + // [#not-implemented-hide:] + message ZoneAwareLbConfig { + // [#not-implemented-hide:] + // Configures percentage of requests that will be considered for zone aware routing + // if zone aware routing is configured. If not specified, the default is 100%. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + envoy.type.Percent routing_enabled = 1; + // [#not-implemented-hide:] + // Configures minimum upstream cluster size required for zone aware routing + // If upstream cluster size is less than specified, zone aware routing is not performed + // even if zone aware routing is configured. If not specified, the default is 6. + // * :ref:`runtime values `. + // * :ref:`Zone aware routing support `. + google.protobuf.UInt64Value min_cluster_size = 2; + } + // Configuration for :ref:`locality weighted load balancing + // ` + message LocalityWeightedLbConfig { + } + oneof locality_config_specifier { + // [#not-implemented-hide:] + ZoneAwareLbConfig zone_aware_lb_config = 2; + LocalityWeightedLbConfig locality_weighted_lb_config = 3; + } + } + + // Common configuration for all load balancer implementations. + CommonLbConfig common_lb_config = 27; + + // Optional custom transport socket implementation to use for upstream connections. + core.TransportSocket transport_socket = 24; + + // The Metadata field can be used to provide additional information about the + // cluster. It can be used for stats, logging, and varying filter behavior. + // Fields should use reverse DNS notation to denote which entity within Envoy + // will need the information. For instance, if the metadata is intended for + // the Router filter, the filter name should be specified as *envoy.router*. + core.Metadata metadata = 25; + + enum ClusterProtocolSelection { + // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + // If :ref:`http2_protocol_options ` are + // present, HTTP2 will be used, otherwise HTTP1.1 will be used. + USE_CONFIGURED_PROTOCOL = 0; + // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + USE_DOWNSTREAM_PROTOCOL = 1; + } + + // Determines how Envoy selects the protocol used to speak to upstream hosts. + ClusterProtocolSelection protocol_selection = 26; +} + +// An extensible structure containing the address Envoy should bind to when +// establishing upstream connections. +message UpstreamBindConfig { + // The address Envoy should bind to when establishing upstream connections. + core.Address source_address = 1; +} diff --git a/api/envoy/api/v2/cluster/BUILD b/api/envoy/api/v2/cluster/BUILD new file mode 100644 index 000000000000..193a3af57b71 --- /dev/null +++ b/api/envoy/api/v2/cluster/BUILD @@ -0,0 +1,35 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "circuit_breaker", + srcs = ["circuit_breaker.proto"], + visibility = [ + "//envoy/api/v2:__pkg__", + ], + deps = [ + "//envoy/api/v2/core:base", + ], +) + +api_go_proto_library( + name = "circuit_breaker", + proto = ":circuit_breaker", + deps = [ + "//envoy/api/v2/core:base_go_proto", + ], +) + +api_proto_library( + name = "outlier_detection", + srcs = ["outlier_detection.proto"], + visibility = [ + "//envoy/api/v2:__pkg__", + ], +) + +api_go_proto_library( + name = "outlier_detection", + proto = ":outlier_detection", +) diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto new file mode 100644 index 000000000000..19e378d779de --- /dev/null +++ b/api/envoy/api/v2/cluster/circuit_breaker.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; + +package envoy.api.v2.cluster; +option go_package = "cluster"; + +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/wrappers.proto"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Circuit breakers] + +// :ref:`Circuit breaking` settings can be +// specified individually for each defined priority. +message CircuitBreakers { + + // A Thresholds defines CircuitBreaker settings for a + // :ref:`RoutingPriority`. + message Thresholds { + // The :ref:`RoutingPriority` + // the specified CircuitBreaker settings apply to. + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + core.RoutingPriority priority = 1; + + // The maximum number of connections that Envoy will make to the upstream + // cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_connections = 2; + + // The maximum number of pending requests that Envoy will allow to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_pending_requests = 3; + + // The maximum number of parallel requests that Envoy will make to the + // upstream cluster. If not specified, the default is 1024. + google.protobuf.UInt32Value max_requests = 4; + + // The maximum number of parallel retries that Envoy will allow to the + // upstream cluster. If not specified, the default is 3. + google.protobuf.UInt32Value max_retries = 5; + } + + // If multiple :ref:`Thresholds` + // are defined with the same :ref:`RoutingPriority`, + // the first one in the list is used. If no Thresholds is defined for a given + // :ref:`RoutingPriority`, the default values + // are used. + repeated Thresholds thresholds = 1; +} diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto new file mode 100644 index 000000000000..bab817adfb08 --- /dev/null +++ b/api/envoy/api/v2/cluster/outlier_detection.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package envoy.api.v2.cluster; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Outlier detection] + +// See the :ref:`architecture overview ` for +// more information on outlier detection. +message OutlierDetection { + // The number of consecutive 5xx responses before a consecutive 5xx ejection + // occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_5xx = 1; + + // The time interval between ejection analysis sweeps. This can result in + // both new ejections as well as hosts being returned to service. Defaults + // to 10000ms or 10s. + google.protobuf.Duration interval = 2 [(validate.rules).duration.gt = {}]; + + // The base time that a host is ejected for. The real time is equal to the + // base time multiplied by the number of times the host has been ejected. + // Defaults to 30000ms or 30s. + google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration.gt = {}]; + + // The maximum % of an upstream cluster that can be ejected due to outlier + // detection. Defaults to 10%. + google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32.lte = 100]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive 5xx. This setting can be used to disable + // ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32.lte = 100]; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through success rate statistics. This setting can be used to + // disable ejection or to ramp it up slowly. Defaults to 100. + google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32.lte = 100]; + + // The number of hosts in a cluster that must have enough request volume to + // detect success rate outliers. If the number of hosts is less than this + // setting, outlier detection via success rate statistics is not performed + // for any host in the cluster. Defaults to 5. + google.protobuf.UInt32Value success_rate_minimum_hosts = 7; + + // The minimum number of total requests that must be collected in one + // interval (as defined by the interval duration above) to include this host + // in success rate based outlier detection. If the volume is lower than this + // setting, outlier detection via success rate statistics is not performed + // for that host. Defaults to 100. + google.protobuf.UInt32Value success_rate_request_volume = 8; + + // This factor is used to determine the ejection threshold for success rate + // outlier ejection. The ejection threshold is the difference between the + // mean success rate, and the product of this factor and the standard + // deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + google.protobuf.UInt32Value success_rate_stdev_factor = 9; + + // The number of consecutive gateway failures (502, 503, 504 status or + // connection errors that are mapped to one of those status codes) before a + // consecutive gateway failure ejection occurs. Defaults to 5. + google.protobuf.UInt32Value consecutive_gateway_failure = 10; + + // The % chance that a host will be actually ejected when an outlier status + // is detected through consecutive gateway failures. This setting can be + // used to disable ejection or to ramp it up slowly. Defaults to 0. + google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 + [(validate.rules).uint32.lte = 100]; +} diff --git a/api/envoy/api/v2/core/BUILD b/api/envoy/api/v2/core/BUILD new file mode 100644 index 000000000000..4054213a2fb2 --- /dev/null +++ b/api/envoy/api/v2/core/BUILD @@ -0,0 +1,121 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +package_group( + name = "friends", + includes = [ + "//envoy/api/v2:friends", + ], + packages = [ + "//envoy/api/v2/auth", + "//envoy/api/v2/cluster", + "//envoy/api/v2/endpoint", + "//envoy/api/v2/listener", + "//envoy/api/v2/route", + ], +) + +api_proto_library( + name = "address", + srcs = ["address.proto"], + visibility = [ + ":friends", + ], +) + +api_go_proto_library( + name = "address", + proto = ":address", +) + +api_proto_library( + name = "base", + srcs = ["base.proto"], + visibility = [ + ":friends", + ], +) + +api_go_proto_library( + name = "base", + proto = ":base", + deps = [":address_go_proto"], +) + +api_proto_library( + name = "health_check", + srcs = ["health_check.proto"], + visibility = [ + ":friends", + ], + deps = [":base"], +) + +api_go_proto_library( + name = "health_check", + proto = ":health_check", + deps = [":base_go_proto"], +) + +api_proto_library( + name = "config_source", + srcs = ["config_source.proto"], + visibility = [ + ":friends", + ], + deps = [ + ":base", + ":grpc_service", + ], +) + +api_go_proto_library( + name = "config_source", + proto = ":config_source", + deps = [ + ":base_go_proto", + ":grpc_service_go_proto", + ], +) + +api_proto_library( + name = "http_uri", + srcs = ["http_uri.proto"], + visibility = [ + ":friends", + ], +) + +api_go_proto_library( + name = "http_uri", + proto = ":http_uri", +) + +api_proto_library( + name = "grpc_service", + srcs = ["grpc_service.proto"], + visibility = [ + ":friends", + ], + deps = [":base"], +) + +api_go_proto_library( + name = "grpc_service", + proto = ":grpc_service", + deps = [":base_go_proto"], +) + +api_proto_library( + name = "protocol", + srcs = ["protocol.proto"], + visibility = [ + ":friends", + ], +) + +api_go_proto_library( + name = "protocol", + proto = ":protocol", +) diff --git a/api/envoy/api/v2/core/address.proto b/api/envoy/api/v2/core/address.proto new file mode 100644 index 000000000000..bf9051a89a7d --- /dev/null +++ b/api/envoy/api/v2/core/address.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Network addresses] + +message Pipe { + // Unix Domain Socket path. On Linux, paths starting with '@' will use the + // abstract namespace. The starting '@' is replaced by a null byte by Envoy. + // Paths starting with '@' will result in an error in environments other than + // Linux. + string path = 1 [(validate.rules).string.min_bytes = 1]; +} + +message SocketAddress { + enum Protocol { + option (gogoproto.goproto_enum_prefix) = false; + TCP = 0; + // [#not-implemented-hide:] + UDP = 1; + } + Protocol protocol = 1 [(validate.rules).enum.defined_only = true]; + // The address for this socket. :ref:`Listeners ` will bind + // to the address or outbound connections will be made. An empty address is + // not allowed, specify ``0.0.0.0`` or ``::`` to bind any. It's still possible to + // distinguish on an address via the prefix/suffix matching in + // FilterChainMatch after connection. For :ref:`clusters + // `, an address may be either an IP or + // hostname to be resolved via DNS. If it is a hostname, :ref:`resolver_name + // ` should be set unless default + // (i.e. DNS) resolution is expected. + string address = 2 [(validate.rules).string.min_bytes = 1]; + oneof port_specifier { + option (validate.required) = true; + uint32 port_value = 3; + // This is only valid if :ref:`resolver_name + // ` is specified below and the + // named resolver is capable of named port resolution. + string named_port = 4; + } + // The name of the resolver. This must have been registered with Envoy. If this is + // empty, a context dependent default applies. If address is a hostname this + // should be set for resolution other than DNS. If the address is a concrete + // IP address, no resolution will occur. + string resolver_name = 5; + + // When binding to an IPv6 address above, this enables `IPv4 compatibity + // `_. Binding to ``::`` will + // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into + // IPv6 space as ``::FFFF:``. + bool ipv4_compat = 6; +} + +message BindConfig { + // The address to bind to when creating a socket. + SocketAddress source_address = 1 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; + + // Whether to set the *IP_FREEBIND* option when creating the socket. When this + // flag is set to true, allows the :ref:`source_address + // ` to be an IP address + // that is not configured on the system running Envoy. When this flag is set + // to false, the option *IP_FREEBIND* is disabled on the socket. When this + // flag is not set (default), the socket is not modified, i.e. the option is + // neither enabled nor disabled. + google.protobuf.BoolValue freebind = 2; +} + +// Addresses specify either a logical or physical address and port, which are +// used to tell Envoy where to bind/listen, connect to upstream and find +// management servers. +message Address { + oneof address { + option (validate.required) = true; + + SocketAddress socket_address = 1; + Pipe pipe = 2; + } +} + +// CidrRange specifies an IP Address and a prefix length to construct +// the subnet mask for a `CIDR `_ range. +message CidrRange { + // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. + string address_prefix = 1 [(validate.rules).string.min_bytes = 1]; + // Length of prefix, e.g. 0, 32. + google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32.lte = 128]; +} diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto new file mode 100644 index 000000000000..ade2f025845e --- /dev/null +++ b/api/envoy/api/v2/core/base.proto @@ -0,0 +1,184 @@ +syntax = "proto3"; + +package envoy.api.v2.core; +option go_package = "core"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Common types] + +// Identifies location of where either Envoy runs or where upstream hosts run. +message Locality { + // Region this :ref:`zone ` belongs to. + string region = 1; + + // Defines the local service zone where Envoy is running. Though optional, it + // should be set if discovery service routing is used and the discovery + // service exposes :ref:`zone data `, + // either in this message or via :option:`--service-zone`. The meaning of zone + // is context dependent, e.g. `Availability Zone (AZ) + // `_ + // on AWS, `Zone `_ on + // GCP, etc. + string zone = 2; + + // When used for locality of upstream hosts, this field further splits zone + // into smaller chunks of sub-zones so they can be load balanced + // independently. + string sub_zone = 3; +} + +// Identifies a specific Envoy instance. The node identifier is presented to the +// management server, which may use this identifier to distinguish per Envoy +// configuration for serving. +message Node { + // An opaque node identifier for the Envoy node. This also provides the local + // service node name. It should be set if any of the following features are + // used: :ref:`statsd `, :ref:`CDS + // `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-node`. + string id = 1; + + // Defines the local service cluster name where Envoy is running. Though + // optional, it should be set if any of the following features are used: + // :ref:`statsd `, :ref:`health check cluster + // verification `, + // :ref:`runtime override directory `, + // :ref:`user agent addition `, + // :ref:`HTTP global rate limiting `, + // :ref:`CDS `, and :ref:`HTTP tracing + // `, either in this message or via + // :option:`--service-cluster`. + string cluster = 2; + + // Opaque metadata extending the node identifier. Envoy will pass this + // directly to the management server. + google.protobuf.Struct metadata = 3; + + // Locality specifying where the Envoy instance is running. + Locality locality = 4; + + // This is motivated by informing a management server during canary which + // version of Envoy is being tested in a heterogeneous fleet. This will be set + // by Envoy in management server RPCs. + string build_version = 5; +} + +// Metadata provides additional inputs to filters based on matched listeners, +// filter chains, routes and endpoints. It is structured as a map from filter +// name (in reverse DNS format) to metadata specific to the filter. Metadata +// key-values for a filter are merged as connection and request handling occurs, +// with later values for the same key overriding earlier values. +// +// An example use of metadata is providing additional values to +// http_connection_manager in the envoy.http_connection_manager.access_log +// namespace. +// +// For load balancing, Metadata provides a means to subset cluster endpoints. +// Endpoints have a Metadata object associated and routes contain a Metadata +// object to match against. There are some well defined metadata used today for +// this purpose: +// +// * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an +// endpoint and is also used during header processing +// (x-envoy-upstream-canary) and for stats purposes. +message Metadata { + // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + // namespace is reserved for Envoy's built-in filters. + map filter_metadata = 1; +} + +// Runtime derived uint32 with a default when not specified. +message RuntimeUInt32 { + // Default value if runtime value is not available. + uint32 default_value = 2; + + // Runtime key to get value for comparison. This value is used if defined. + string runtime_key = 3 [(validate.rules).string.min_bytes = 1]; +} + +// Envoy supports :ref:`upstream priority routing +// ` both at the route and the virtual +// cluster level. The current priority implementation uses different connection +// pool and circuit breaking settings for each priority level. This means that +// even for HTTP/2 requests, two physical connections will be used to an +// upstream host. In the future Envoy will likely support true HTTP/2 priority +// over a single upstream connection. +enum RoutingPriority { + DEFAULT = 0; + HIGH = 1; +} + +// HTTP request method. +enum RequestMethod { + option (gogoproto.goproto_enum_prefix) = false; + METHOD_UNSPECIFIED = 0; + GET = 1; + HEAD = 2; + POST = 3; + PUT = 4; + DELETE = 5; + CONNECT = 6; + OPTIONS = 7; + TRACE = 8; +} + +// Header name/value pair. +message HeaderValue { + // Header name. + string key = 1; + + // Header value. + // + // The same :ref:`format specifier ` as used for + // :ref:`HTTP access logging ` applies here, however + // unknown header values are replaced with the empty string instead of `-`. + string value = 2; +} + +// Header name/value pair plus option to control append behavior. +message HeaderValueOption { + // Header name/value pair that this option applies to. + HeaderValue header = 1; + + // Should the value be appended? If true (default), the value is appended to + // existing values. + google.protobuf.BoolValue append = 2; +} + +// Data source consisting of either a file or an inline value. +message DataSource { + oneof specifier { + option (validate.required) = true; + + // Local filesystem data source. + string filename = 1 [(validate.rules).string.min_bytes = 1]; + + // Bytes inlined in the configuration. + bytes inline_bytes = 2 [(validate.rules).bytes.min_len = 1]; + + // String inlined in the configuration. + string inline_string = 3 [(validate.rules).string.min_bytes = 1]; + } +} + +// Configuration for transport socket in :ref:`listeners ` and +// :ref:`clusters `. If the configuration is +// empty, a default transport socket implementation and configuration will be +// chosen based on the platform and existence of tls_context. +message TransportSocket { + // The name of the transport socket to instantiate. The name must match a supported transport + // socket implementation. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Implementation specific configuration which depends on the implementation being instantiated. + // See the supported transport socket implementations for further documentation. + google.protobuf.Struct config = 2; +} diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto new file mode 100644 index 000000000000..17bdbbeb28d9 --- /dev/null +++ b/api/envoy/api/v2/core/config_source.proto @@ -0,0 +1,87 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Configuration sources] + +// API configuration source. This identifies the API type and cluster that Envoy +// will use to fetch an xDS API. +message ApiConfigSource { + // APIs may be fetched via either REST or gRPC. + enum ApiType { + // REST-JSON legacy corresponds to the v1 API. + REST_LEGACY = 0; + // REST-JSON v2 API. The `canonical JSON encoding + // `_ for + // the v2 protos is used. + REST = 1; + // gRPC v2 API. + GRPC = 2; + } + ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; + // Multiple cluster names may be provided for REST_LEGACY/REST. If > 1 + // cluster is defined, clusters will be cycled through if any kind of failure + // occurs. + // + // .. note:: + // + // The cluster with name ``cluster_name`` must be statically defined and its + // type must not be ``EDS``. + repeated string cluster_names = 2; + + // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + // services will be cycled through if any kind of failure occurs. + // + // .. note:: + // + // If a gRPC service points to a ``cluster_name``, it must be statically + // defined and its type must not be ``EDS``. + repeated GrpcService grpc_services = 4; + + // For REST APIs, the delay between successive polls. + google.protobuf.Duration refresh_delay = 3 [(gogoproto.stdduration) = true]; +} + +// Aggregated Discovery Service (ADS) options. This is currently empty, but when +// set in :ref:`ConfigSource ` can be used to +// specify that ADS is to be used. +message AggregatedConfigSource { +} + +// Configuration for :ref:`listeners `, :ref:`clusters +// `, :ref:`routes +// `, :ref:`endpoints +// ` etc. may either be sourced from the +// filesystem or from an xDS API source. Filesystem configs are watched with +// inotify for updates. +message ConfigSource { + oneof config_source_specifier { + option (validate.required) = true; + // Path on the filesystem to source and watch for configuration updates. + // + // .. note:: + // + // The path to the source must exist at config load time. + // + // .. note:: + // + // Envoy will only watch the file path for *moves.* This is because in general only moves + // are atomic. The same method of swapping files as is demonstrated in the + // :ref:`runtime documentation ` can be used here also. + string path = 1; + // API configuration source. + ApiConfigSource api_config_source = 2; + // When set, ADS will be used to fetch resources. The ADS API configuration + // source in the bootstrap configuration is used. + AggregatedConfigSource ads = 3; + } +} diff --git a/api/envoy/api/v2/core/grpc_service.proto b/api/envoy/api/v2/core/grpc_service.proto new file mode 100644 index 000000000000..91c2595198f3 --- /dev/null +++ b/api/envoy/api/v2/core/grpc_service.proto @@ -0,0 +1,105 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: gRPC services] +// [#proto-status: draft] + +// gRPC service configuration. This is used by :ref:`ApiConfigSource +// ` and filter configurations. +message GrpcService { + message EnvoyGrpc { + // The name of the upstream gRPC cluster. SSL credentials will be supplied + // in the :ref:`Cluster ` :ref:`tls_context + // `. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + } + + message GoogleGrpc { + // The target URI when using the `Google C++ gRPC client + // `_. SSL credentials will be supplied in + // :ref:`credentials `. + string target_uri = 1 [(validate.rules).string.min_bytes = 1]; + + // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. + message SslCredentials { + // PEM encoded server root certificates. + DataSource root_certs = 1; + + // PEM encoded client private key. + DataSource private_key = 2; + + // PEM encoded client certificate chain. + DataSource cert_chain = 3; + } + SslCredentials ssl_credentials = 2; + + // The human readable prefix to use when emitting statistics for the gRPC + // service. + // + // .. csv-table:: + // :header: Name, Type, Description + // :widths: 1, 1, 2 + // + // streams_total, Counter, Total number of streams opened + // streams_closed_, Counter, Total streams closed with + string stat_prefix = 3 [(validate.rules).string.min_bytes = 1]; + + // Additional configuration for site-specific customizations of the Google + // gRPC library. + google.protobuf.Struct config = 4; + } + + oneof target_specifier { + option (validate.required) = true; + + // Envoy's in-built gRPC client. + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + EnvoyGrpc envoy_grpc = 1; + + // `Google C++ gRPC client `_ + // See the :ref:`gRPC services overview ` + // documentation for discussion on gRPC client selection. + GoogleGrpc google_grpc = 2; + } + + // The timeout for the gRPC request. This is the timeout for a specific + // request. + google.protobuf.Duration timeout = 3; + + // gRPC credentials as described at + // https://grpc.io/docs/guides/auth.html#credential-types. + // + // .. note:: + // + // Credentials are only currently implemented for the Google gRPC client. + message Credentials { + oneof credential_specifier { + option (validate.required) = true; + + // OAuth2 access token, see + // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. + string access_token = 1; + // [#comment: TODO(htuch): other gRPC auth types, e.g. IAM credentials, JWT, etc.] + } + } + // A set of credentials that will be composed to form the `channel credentials + // `_. + repeated Credentials credentials = 4; + + // Additional metadata to include in streams initiated to the GrpcService. + // This can be used for scenarios in which additional ad hoc authorization + // headers (e.g. `x-foo-bar: baz-key`) are to be injected. + repeated HeaderValue initial_metadata = 5; +} diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto new file mode 100644 index 000000000000..90d15b7357a9 --- /dev/null +++ b/api/envoy/api/v2/core/health_check.proto @@ -0,0 +1,202 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Health check] +// * Health checking :ref:`architecture overview `. +// * If health checking is configured for a cluster, additional statistics are emitted. They are +// documented :ref:`here `. + +message HealthCheck { + // The time to wait for a health check response. If the timeout is reached the + // health check attempt will be considered a failure. + google.protobuf.Duration timeout = 1 [(validate.rules).duration.required = true]; + + // The interval between health checks. + google.protobuf.Duration interval = 2 [(validate.rules).duration.required = true]; + + // An optional jitter amount in millseconds. If specified, during every + // internal Envoy will add 0 to interval_jitter to the wait time. + google.protobuf.Duration interval_jitter = 3; + + // The number of unhealthy health checks required before a host is marked + // unhealthy. Note that for *http* health checking if a host responds with 503 + // this threshold is ignored and the host is considered unhealthy immediately. + google.protobuf.UInt32Value unhealthy_threshold = 4; + + // The number of healthy health checks required before a host is marked + // healthy. Note that during startup, only a single successful health check is + // required to mark a host healthy. + google.protobuf.UInt32Value healthy_threshold = 5; + + // [#not-implemented-hide:] Non-serving port for health checking. + google.protobuf.UInt32Value alt_port = 6; + + // Reuse health check connection between health checks. Default is true. + google.protobuf.BoolValue reuse_connection = 7; + + // Describes the encoding of the payload bytes in the payload. + message Payload { + oneof payload { + option (validate.required) = true; + + // Hex encoded payload. E.g., "000000FF". + string text = 1 [(validate.rules).string.min_bytes = 1]; + + // [#not-implemented-hide:] Binary payload. + bytes binary = 2; + } + } + + message HttpHealthCheck { + // The value of the host header in the HTTP health check request. If + // left empty (default value), the IP on behalf of which this health check is performed will be + // used. + string host = 1; + + // Specifies the HTTP path that will be requested during health checking. For example + // */healthcheck*. + string path = 2 [(validate.rules).string.min_bytes = 1]; + + // [#not-implemented-hide:] HTTP specific payload. + Payload send = 3; + + // [#not-implemented-hide:] HTTP specific response. + Payload receive = 4; + + // An optional service name parameter which is used to validate the identity of + // the health checked cluster. See the :ref:`architecture overview + // ` for more information. + string service_name = 5; + + // Specifies a list of HTTP headers that should be added to each request that is sent to the + // health checked cluster. + repeated core.HeaderValueOption request_headers_to_add = 6; + } + + message TcpHealthCheck { + // Empty payloads imply a connect-only health check. + Payload send = 1; + + // When checking the response, “fuzzy” matching is performed such that each + // binary block must be found, and in the order specified, but not + // necessarily contiguous. + repeated Payload receive = 2; + } + + message RedisHealthCheck { + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; + } + + // `grpc.health.v1.Health + // `_-based + // healthcheck. See `gRPC doc `_ + // for details. + message GrpcHealthCheck { + // An optional service name parameter which will be sent to gRPC service in + // `grpc.health.v1.HealthCheckRequest + // `_. + // message. See `gRPC health-checking overview + // `_ for more information. + string service_name = 1; + } + + // [#not-implemented-hide:] Custom health check. + message CustomHealthCheck { + // The registered name of the custom health check. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A custom health checker specific configuration which depends on the custom health checker + // being instantiated. See :api:`envoy/config/health_checker` for reference. + google.protobuf.Struct config = 2; + } + + oneof health_checker { + option (validate.required) = true; + + // HTTP health check. + HttpHealthCheck http_health_check = 8; + + // TCP health check. + TcpHealthCheck tcp_health_check = 9; + + // Redis health check. + RedisHealthCheck redis_health_check = 10; + + // gRPC health check. + GrpcHealthCheck grpc_health_check = 11; + + // [#not-implemented-hide:] Custom health check. + CustomHealthCheck custom_health_check = 13; + } + + // The "no traffic interval" is a special health check interval that is used when a cluster has + // never had traffic routed to it. This lower interval allows cluster information to be kept up to + // date, without sending a potentially large amount of active health checking traffic for no + // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + // standard health check interval that is defined. Note that this interval takes precedence over + // any other. + // + // The default value for "no traffic interval" is 60 seconds. + google.protobuf.Duration no_traffic_interval = 12; + + // The "unhealthy interval" is a health check interval that is used for hosts that are marked as + // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + // standard health check interval that is defined. + // + // The default value for "unhealthy interval" is the same as "interval". + google.protobuf.Duration unhealthy_interval = 14; + + // The "unhealthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as unhealthy. For subsequent health checks + // Envoy will shift back to using either "unhealthy interval" if present or the standard health + // check interval that is defined. + // + // The default value for "unhealthy edge interval" is the same as "unhealthy interval". + google.protobuf.Duration unhealthy_edge_interval = 15; + + // The "healthy edge interval" is a special health check interval that is used for the first + // health check right after a host is marked as healthy. For subsequent health checks + // Envoy will shift back to using the standard health check interval that is defined. + // + // The default value for "healthy edge interval" is the same as the default interval. + google.protobuf.Duration healthy_edge_interval = 16; +} + +// Endpoint health status. +enum HealthStatus { + // The health status is not known. This is interpreted by Envoy as *HEALTHY*. + UNKNOWN = 0; + + // Healthy. + HEALTHY = 1; + + // Unhealthy. + UNHEALTHY = 2; + + // Connection draining in progress. E.g., + // ``_ + // or + // ``_. + // This is interpreted by Envoy as *UNHEALTHY*. + DRAINING = 3; + + // Health check timed out. This is part of HDS and is interpreted by Envoy as + // *UNHEALTHY*. + TIMEOUT = 4; +} diff --git a/api/envoy/api/v2/core/http_uri.proto b/api/envoy/api/v2/core/http_uri.proto new file mode 100644 index 000000000000..92097778b448 --- /dev/null +++ b/api/envoy/api/v2/core/http_uri.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package envoy.api.v2.core; + +import "google/protobuf/duration.proto"; +import "gogoproto/gogo.proto"; + +import "validate/validate.proto"; + +// Envoy external URI descriptor +// [#not-implemented-hide:] +message HttpUri { + // The HTTP server URI. It should be a full FQDN with protocol, host and path. + // + // Example: + // + // .. code-block:: yaml + // + // uri: https://www.googleapis.com/oauth2/v1/certs + // + string uri = 1 [(validate.rules).string.min_bytes = 1]; + + // Specify how `uri` is to be fetched. Today, this requires an explicit + // cluster, but in the future we may support dynamic cluster creation or + // inline DNS resolution. See `issue + // `_. + oneof http_upstream_type { + option (validate.required) = true; + // A cluster is created in the Envoy "cluster_manager" config + // section. This field specifies the cluster name. + // + // Example: + // + // .. code-block:: yaml + // + // cluster: jwks_cluster + // + string cluster = 2 [(validate.rules).string.min_bytes = 1]; + } + + // Sets the maximum duration in milliseconds that a response can take to arrive upon request. + google.protobuf.Duration timeout = 3 [ + (validate.rules).duration.gte = {}, + (validate.rules).duration.required = true, + (gogoproto.stdduration) = true + ]; +} diff --git a/api/envoy/api/v2/core/protocol.proto b/api/envoy/api/v2/core/protocol.proto new file mode 100644 index 000000000000..2e7948787dfe --- /dev/null +++ b/api/envoy/api/v2/core/protocol.proto @@ -0,0 +1,84 @@ +// [#protodoc-title: Protocol options] + +syntax = "proto3"; + +package envoy.api.v2.core; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Protocol options] + +// [#not-implemented-hide:] +message TcpProtocolOptions { +} + +message HttpProtocolOptions { + // The idle timeout for upstream connection pool connections. The idle timeout is defined as the + // period in which there are no active requests. If not set, there is no idle timeout. When the + // idle timeout is reached the connection will be closed. Note that request based timeouts mean + // that HTTP/2 PINGs will not keep the connection alive. + google.protobuf.Duration idle_timeout = 1 [(gogoproto.stdduration) = true]; +} + +message Http1ProtocolOptions { + // Handle HTTP requests with absolute URLs in the requests. These requests + // are generally sent by clients to forward/explicit proxies. This allows clients to configure + // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + // *http_proxy* environment variable. + google.protobuf.BoolValue allow_absolute_url = 1; + + // Handle incoming HTTP/1.0 and HTTP 0.9 requests. + // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + // style connect logic, dechunking, and handling lack of client host iff + // *default_host_for_http_10* is configured. + bool accept_http_10 = 2; + + // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as + // Envoy does not otherwise support HTTP/1.0 without a Host header. + // This is a no-op if *accept_http_10* is not true. + string default_host_for_http_10 = 3; +} + +message Http2ProtocolOptions { + // `Maximum table size `_ + // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values + // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header + // compression. + google.protobuf.UInt32Value hpack_table_size = 1; + + // `Maximum concurrent streams `_ + // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) + // and defaults to 2147483647. + google.protobuf.UInt32Value max_concurrent_streams = 2 + [(validate.rules).uint32 = {gte: 1, lte: 2147483647}]; + + // `Initial stream-level flow-control window + // `_ size. Valid values range from 65535 + // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + // (256 * 1024 * 1024). + // + // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default + // window size now, so it's also the minimum. + + // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + // stop the flow of data to the codec buffers. + google.protobuf.UInt32Value initial_stream_window_size = 3 + [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; + + // Similar to *initial_stream_window_size*, but for connection-level flow-control + // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + google.protobuf.UInt32Value initial_connection_window_size = 4 + [(validate.rules).uint32 = {gte: 65535, lte: 2147483647}]; +} + +// [#not-implemented-hide:] +message GrpcProtocolOptions { + Http2ProtocolOptions http2_protocol_options = 1; +} diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto new file mode 100644 index 000000000000..74e7c5a2be96 --- /dev/null +++ b/api/envoy/api/v2/discovery.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package envoy.api.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/any.proto"; +import "google/rpc/status.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Common discovery API components] + +// A DiscoveryRequest requests a set of versioned resources of the same type for +// a given Envoy node on some API. +message DiscoveryRequest { + // The version_info provided in the request messages will be the version_info + // received with the most recent successfully processed response or empty on + // the first request. It is expected that no new request is sent after a + // response is received until the Envoy instance is ready to ACK/NACK the new + // configuration. ACK/NACK takes place by returning the new API config version + // as applied or the previous API config version respectively. Each type_url + // (see below) has an independent version associated with it. + string version_info = 1; + + // The node making the request. + core.Node node = 2; + + // List of resources to subscribe to, e.g. list of cluster names or a route + // configuration name. If this is empty, all resources for the API are + // returned. LDS/CDS expect empty resource_names, since this is global + // discovery for the Envoy instance. The LDS and CDS responses will then imply + // a number of resources that need to be fetched via EDS/RDS, which will be + // explicitly enumerated in resource_names. + repeated string resource_names = 3; + + // Type of the resource that is being requested, e.g. + // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit + // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is + // required for ADS. + string type_url = 4; + + // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above + // discussion on version_info and the DiscoveryResponse nonce comment. This + // may be empty if no nonce is available, e.g. at startup or for non-stream + // xDS implementations. + string response_nonce = 5; + + // This is populated when the previous :ref:`DiscoveryResponse ` + // failed to update configuration. The *message* field in *error_details* provides the Envoy + // internal exception related to the failure. It is only intended for consumption during manual + // debugging, the string provided is not guaranteed to be stable across Envoy versions. + google.rpc.Status error_detail = 6; +} + +message DiscoveryResponse { + // The version of the response data. + string version_info = 1; + + // The response resources. These resources are typed and depend on the API being called. + repeated google.protobuf.Any resources = 2 [(gogoproto.nullable) = false]; + + // [#not-implemented-hide:] + // Canary is used to support two Envoy command line flags: + // + // * --terminate-on-canary-transition-failure. When set, Envoy is able to + // terminate if it detects that configuration is stuck at canary. Consider + // this example sequence of updates: + // - Management server applies a canary config successfully. + // - Management server rolls back to a production config. + // - Envoy rejects the new production config. + // Since there is no sensible way to continue receiving configuration + // updates, Envoy will then terminate and apply production config from a + // clean slate. + // * --dry-run-canary. When set, a canary response will never be applied, only + // validated via a dry run. + bool canary = 3; + + // Type URL for resources. This must be consistent with the type_url in the + // Any messages for resources if resources is non-empty. This effectively + // identifies the xDS API when muxing over ADS. + string type_url = 4; + + // For gRPC based subscriptions, the nonce provides a way to explicitly ack a + // specific DiscoveryResponse in a following DiscoveryRequest. Additional + // messages may have been sent by Envoy to the management server for the + // previous version on the stream prior to this DiscoveryResponse, that were + // unprocessed at response send time. The nonce allows the management server + // to ignore any further DiscoveryRequests for the previous version until a + // DiscoveryRequest bearing the nonce. The nonce is optional and is not + // required for non-stream based xDS implementations. + string nonce = 5; +} diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto new file mode 100644 index 000000000000..0c63fbaa5848 --- /dev/null +++ b/api/envoy/api/v2/eds.proto @@ -0,0 +1,63 @@ +syntax = "proto3"; + +package envoy.api.v2; + +option java_generic_services = true; + +import "envoy/api/v2/discovery.proto"; +import "envoy/api/v2/endpoint/endpoint.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: EDS] + +service EndpointDiscoveryService { + // The resource_names field in DiscoveryRequest specifies a list of clusters + // to subscribe to updates for. + rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:endpoints" + body: "*" + }; + } +} + +// Each route from RDS will map to a single cluster or traffic split across +// clusters using weights expressed in the RDS WeightedCluster. +// +// With EDS, each cluster is treated independently from a LB perspective, with +// LB taking place between the Localities within a cluster and at a finer +// granularity between the hosts within a locality. For a given cluster, the +// effective weight of a host is its load_balancing_weight multiplied by the +// load_balancing_weight of its Locality. +message ClusterLoadAssignment { + // Name of the cluster. This will be the :ref:`service_name + // ` value if specified + // in the cluster :ref:`EdsClusterConfig + // `. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + + // List of endpoints to load balance to. + repeated endpoint.LocalityLbEndpoints endpoints = 2 [(gogoproto.nullable) = false]; + + // Load balancing policy settings. + message Policy { + // Percentage of traffic (0-100) that should be dropped. This + // action allows protection of upstream hosts should they unable to + // recover from an outage or should they be unable to autoscale and hence + // overall incoming traffic volume need to be trimmed to protect them. + // [#v2-api-diff: This is known as maintenance mode in v1.] + double drop_overload = 1 [(validate.rules).double = {gte: 0, lte: 100}]; + } + + // Load balancing policy settings. + Policy policy = 4; +} diff --git a/api/envoy/api/v2/endpoint/BUILD b/api/envoy/api/v2/endpoint/BUILD new file mode 100644 index 000000000000..0ae7d994e9d4 --- /dev/null +++ b/api/envoy/api/v2/endpoint/BUILD @@ -0,0 +1,47 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "endpoint", + srcs = ["endpoint.proto"], + visibility = ["//envoy/api/v2:friends"], + deps = [ + "//envoy/api/v2/auth:cert", + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", + "//envoy/api/v2/core:health_check", + "//envoy/api/v2/core:protocol", + ], +) + +api_go_proto_library( + name = "endpoint", + proto = ":endpoint", + deps = [ + "//envoy/api/v2/auth:cert_go_proto", + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", + "//envoy/api/v2/core:health_check_go_proto", + "//envoy/api/v2/core:protocol_go_proto", + ], +) + +api_proto_library( + name = "load_report", + srcs = ["load_report.proto"], + visibility = ["//envoy/api/v2:friends"], + deps = [ + "//envoy/api/v2/core:base", + ], +) + +api_go_proto_library( + name = "load_report", + proto = ":load_report", + deps = [ + "//envoy/api/v2/core:base_go_proto", + ], +) diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto new file mode 100644 index 000000000000..0095dd40a3fd --- /dev/null +++ b/api/envoy/api/v2/endpoint/endpoint.proto @@ -0,0 +1,119 @@ +syntax = "proto3"; + +package envoy.api.v2.endpoint; +option go_package = "endpoint"; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/health_check.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Endpoints] + +// Upstream host identifier. +message Endpoint { + // The upstream host address. + core.Address address = 1; + + // [#not-implemented-hide:] The optional health check configuration. + message HealthCheckConfig { + // Optional alternative health check port value. + // + // By default the health check address port of an upstream host is the same + // as the host's serving address port. This provides an alternative health + // check port. Setting this with a non-zero value allows an upstream host + // to have different health check address port. + uint32 port_value = 1; + } + + // [#not-implemented-hide:] The optional health check configuration is used as + // configuration for the health checker to contact the health checked host. + // + // .. attention:: + // + // This takes into effect only for upstream clusters with + // :ref:`active health checking ` enabled. + HealthCheckConfig health_check_config = 2; +} + +// An Endpoint that Envoy can route traffic to. +message LbEndpoint { + // Upstream host identifier + Endpoint endpoint = 1; + + // Optional health status when known and supplied by EDS server. + core.HealthStatus health_status = 2; + + // The endpoint metadata specifies values that may be used by the load + // balancer to select endpoints in a cluster for a given request. The filter + // name should be specified as *envoy.lb*. An example boolean key-value pair + // is *canary*, providing the optional canary status of the upstream host. + // This may be matched against in a route's ForwardAction metadata_match field + // to subset the endpoints considered in cluster load balancing. + core.Metadata metadata = 3; + + // The optional load balancing weight of the upstream host, in the range 1 - + // 128. Envoy uses the load balancing weight in some of the built in load + // balancers. The load balancing weight for an endpoint is divided by the sum + // of the weights of all endpoints in the endpoint's locality to produce a + // percentage of traffic for the endpoint. This percentage is then further + // weighted by the endpoint's locality's load balancing weight from + // LocalityLbEndpoints. If unspecified, each host is presumed to have equal + // weight in a locality. + // + // .. attention:: + // + // The limit of 128 is somewhat arbitrary, but is applied due to performance + // concerns with the current implementation and can be removed when + // `this issue `_ is fixed. + google.protobuf.UInt32Value load_balancing_weight = 4 + [(validate.rules).uint32 = {gte: 1, lte: 128}]; +} + +// A group of endpoints belonging to a Locality. +// One can have multiple LocalityLbEndpoints for a locality, but this is +// generally only done if the different groups need to have different load +// balancing weights or different priorities. +message LocalityLbEndpoints { + // Identifies location of where the upstream hosts run. + core.Locality locality = 1; + + // The group of endpoints belonging to the locality specified. + repeated LbEndpoint lb_endpoints = 2 [(gogoproto.nullable) = false]; + + // Optional: Per priority/region/zone/sub_zone weight - range 1-128. The load + // balancing weight for a locality is divided by the sum of the weights of all + // localities at the same priority level to produce the effective percentage + // of traffic for the locality. + // + // Locality weights are only considered when :ref:`locality weighted load + // balancing ` is + // configured. These weights are ignored otherwise. If no weights are + // specificed when locality weighted load balancing is enabled, the cluster is + // assumed to have a weight of 1. + // + // .. attention:: + // + // The limit of 128 is somewhat arbitrary, but is applied due to performance + // concerns with the current implementation and can be removed when + // `this issue `_ is fixed. + google.protobuf.UInt32Value load_balancing_weight = 3 + [(validate.rules).uint32 = {gte: 1, lte: 128}]; + + // Optional: the priority for this LocalityLbEndpoints. If unspecified this will + // default to the highest priority (0). + // + // Under usual circumstances, Envoy will only select endpoints for the highest + // priority (0). In the event all endpoints for a particular priority are + // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the + // next highest priority group. + // + // Priorities should range from 0 (highest) to N (lowest) without skipping. + uint32 priority = 5; +} diff --git a/api/envoy/api/v2/endpoint/load_report.proto b/api/envoy/api/v2/endpoint/load_report.proto new file mode 100644 index 000000000000..b61a0025a7a0 --- /dev/null +++ b/api/envoy/api/v2/endpoint/load_report.proto @@ -0,0 +1,96 @@ +syntax = "proto3"; + +package envoy.api.v2.endpoint; + +import "envoy/api/v2/core/base.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// These are stats Envoy reports to GLB every so often. Report frequency is +// defined by +// :ref:`LoadStatsResponse.load_reporting_interval`. +// Stats per upstream region/zone and optionally per subzone. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message UpstreamLocalityStats { + // Name of zone, region and optionally endpoint group these metrics were + // collected from. Zone and region names could be empty if unknown. + core.Locality locality = 1; + + // The total number of requests sent by this Envoy since the last report. A + // single HTTP or gRPC request or stream is counted as one request. A TCP + // connection is also treated as one request. There is no explicit + // total_requests field below for a locality, but it may be inferred from: + // + // .. code-block:: none + // + // total_requests = total_successful_requests + total_requests_in_progress + + // total_error_requests + // + // The total number of requests successfully completed by the endpoints in the + // locality. These include non-5xx responses for HTTP, where errors + // originate at the client and the endpoint responded successfully. For gRPC, + // the grpc-status values are those not covered by total_error_requests below. + uint64 total_successful_requests = 2; + + // The total number of unfinished requests + uint64 total_requests_in_progress = 3; + + // The total number of requests that failed due to errors at the endpoint. + // For HTTP these are responses with 5xx status codes and for gRPC the + // grpc-status values: + // + // - DeadlineExceeded + // - Unimplemented + // - Internal + // - Unavailable + // - Unknown + // - DataLoss + uint64 total_error_requests = 4; + + // Stats for multi-dimensional load balancing. + repeated EndpointLoadMetricStats load_metric_stats = 5; + + // [#not-implemented-hide:] The priority of the endpoint group these metrics + // were collected from. + uint32 priority = 6; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message EndpointLoadMetricStats { + // Name of the metric; may be empty. + string metric_name = 1; + + // Number of calls that finished and included this metric. + uint64 num_requests_finished_with_metric = 2; + + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + double total_metric_value = 3; +} + +// Per cluster load stats. Envoy reports these stats a management server in a +// :ref:`LoadStatsRequest` +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message ClusterStats { + // The name of the cluster. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1]; + + // Need at least one. + repeated UpstreamLocalityStats upstream_locality_stats = 2 + [(validate.rules).repeated .min_items = 1]; + + // Cluster-level stats such as total_successful_requests may be computed by + // summing upstream_locality_stats. In addition, below there are additional + // cluster-wide stats. The following total_requests equality holds at the + // cluster-level: + // + // .. code-block:: none + // + // sum_locality(total_successful_requests) + sum_locality(total_requests_in_progress) + + // sum_locality(total_error_requests) + total_dropped_requests` + // + // The total number of dropped requests. This covers requests + // deliberately dropped by the drop_overload policy and circuit breaking. + uint64 total_dropped_requests = 3; +} diff --git a/api/envoy/api/v2/lds.proto b/api/envoy/api/v2/lds.proto new file mode 100644 index 000000000000..7780fea23ed7 --- /dev/null +++ b/api/envoy/api/v2/lds.proto @@ -0,0 +1,169 @@ +syntax = "proto3"; + +package envoy.api.v2; + +option java_generic_services = true; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/discovery.proto"; +import "envoy/api/v2/listener/listener.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Listener] +// Listener :ref:`configuration overview ` + +// The Envoy instance initiates an RPC at startup to discover a list of +// listeners. Updates are delivered via streaming from the LDS server and +// consist of a complete update of all listeners. Existing connections will be +// allowed to drain from listeners that are no longer present. +service ListenerDiscoveryService { + rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:listeners" + body: "*" + }; + } +} + +message Listener { + // The unique name by which this listener is known. If no name is provided, + // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + // updated or removed via :ref:`LDS ` a unique name must be provided. + // By default, the maximum length of a listener's name is limited to 60 characters. This limit can + // be increased by setting the :option:`--max-obj-name-len` command line argument to the desired + // value. + string name = 1; + + // The address that the listener should listen on. In general, the address must be unique, though + // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + // Linux as the actual port will be allocated by the OS. + core.Address address = 2 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; + + // A list of filter chains to consider for this listener. The + // :ref:`FilterChain ` with the most specific + // :ref:`FilterChainMatch ` criteria is used on a + // connection. + // + // .. attention:: + // + // In the current version, multiple filter chains are supported **only** so that SNI can be + // configured. See the :ref:`FAQ entry ` on how to configure SNI for more + // information. When multiple filter chains are configured, each filter chain must have an + // **identical** set of :ref:`filters `. If the + // filters differ, the configuration will fail to load. In the future, this limitation will be + // relaxed such that different filters can be used depending on which filter chain matches + // (based on SNI or some other parameter). + repeated listener.FilterChain filter_chains = 3 + [(validate.rules).repeated .min_items = 1, (gogoproto.nullable) = false]; + + // If a connection is redirected using *iptables*, the port on which the proxy + // receives it might be different from the original destination address. When this flag is set to + // true, the listener hands off redirected connections to the listener associated with the + // original destination address. If there is no listener associated with the original destination + // address, the connection is handled by the listener that receives it. Defaults to false. + // + // .. attention:: + // + // This field is deprecated. Use :ref:`an original_dst ` + // :ref:`listener filter ` instead. + // + // Note that hand off to another listener is *NOT* performed without this flag. Once + // :ref:`FilterChainMatch ` is implemented this flag + // will be removed, as filter chain matching can be used to select a filter chain based on the + // restored destination address. + google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; + + // Soft limit on size of the listener’s new connection read and write buffers. + // If unspecified, an implementation defined default is applied (1MiB). + google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; + + // Listener metadata. + core.Metadata metadata = 6; + + // [#not-implemented-hide:] + message DeprecatedV1 { + // Whether the listener should bind to the port. A listener that doesn’t + // bind can only receive connections redirected from other listeners that + // set use_original_dst parameter to true. Default is true. + // + // [V2-API-DIFF] This is deprecated in v2, all Listeners will bind to their + // port. An additional filter chain must be created for every original + // destination port this listener may redirect to in v2, with the original + // port specified in the FilterChainMatch destination_port field. + google.protobuf.BoolValue bind_to_port = 1; + } + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 7; + + enum DrainType { + // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + // filter), listener removal/modification, and hot restart. + DEFAULT = 0; + // Drain in response to listener removal/modification and hot restart. This setting does not + // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + // and egress listeners. + MODIFY_ONLY = 1; + } + + // The type of draining to perform at a listener-wide level. + DrainType drain_type = 8; + + // Listener filters have the opportunity to manipulate and augment the connection metadata that + // is used in connection filter chain matching, for example. These filters are run before any in + // :ref:`filter_chains `. Order matters as the + // filters are processed sequentially right after a socket has been accepted by the listener, and + // before a connection is created. + repeated listener.ListenerFilter listener_filters = 9 [(gogoproto.nullable) = false]; + + // Whether the listener should be set as a transparent socket. + // When this flag is set to true, connections can be redirected to the listener using an + // *iptables* *TPROXY* target, in which case the original source and destination addresses and + // ports are preserved on accepted connections. This flag should be used in combination with + // :ref:`an original_dst ` :ref:`listener filter + // ` to mark the connections' local addresses as + // "restored." This can be used to hand off each redirected connection to another listener + // associated with the connection's destination address. Direct connections to the socket without + // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are + // therefore treated as if they were redirected. + // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. + // When this flag is not set (default), the socket is not modified, i.e. the transparent option + // is neither set nor reset. + google.protobuf.BoolValue transparent = 10; + + // Whether the listener should set the *IP_FREEBIND* socket option. When this + // flag is set to true, listeners can be bound to an IP address that is not + // configured on the system running Envoy. When this flag is set to false, the + // option *IP_FREEBIND* is disabled on the socket. When this flag is not set + // (default), the socket is not modified, i.e. the option is neither enabled + // nor disabled. + google.protobuf.BoolValue freebind = 11; + + // Whether the listener should accept TCP Fast Open (TFO) connections. + // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + // the socket, with a queue length of the specified size + // (see `details in RFC7413 `_). + // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + // When this flag is not set (default), the socket is not modified, + // i.e. the option is neither enabled nor disabled. + // + // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + // TCP_FASTOPEN. + // See `ip-sysctl.txt `_. + // + // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; +} diff --git a/api/envoy/api/v2/listener/BUILD b/api/envoy/api/v2/listener/BUILD new file mode 100644 index 000000000000..063cb64174ee --- /dev/null +++ b/api/envoy/api/v2/listener/BUILD @@ -0,0 +1,24 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "listener", + srcs = ["listener.proto"], + visibility = ["//envoy/api/v2:friends"], + deps = [ + "//envoy/api/v2/auth:cert", + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + ], +) + +api_go_proto_library( + name = "listener", + proto = ":listener", + deps = [ + "//envoy/api/v2/auth:cert_go_proto", + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + ], +) diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto new file mode 100644 index 000000000000..6d947e8bd6e6 --- /dev/null +++ b/api/envoy/api/v2/listener/listener.proto @@ -0,0 +1,133 @@ +syntax = "proto3"; + +package envoy.api.v2.listener; +option go_package = "listener"; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Listener components] +// Listener :ref:`configuration overview ` + +message Filter { + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(mattklein123): Auto generate the following list] + // * :ref:`envoy.client_ssl_auth` + // * :ref:`envoy.echo ` + // * :ref:`envoy.http_connection_manager ` + // * :ref:`envoy.mongo_proxy ` + // * :ref:`envoy.ratelimit ` + // * :ref:`envoy.redis_proxy ` + // * :ref:`envoy.tcp_proxy ` + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Struct config = 2; + + // [#not-implemented-hide:] + message DeprecatedV1 { + string type = 1; + } + + // [#not-implemented-hide:] + DeprecatedV1 deprecated_v1 = 3 [deprecated = true]; +} + +// Specifies the match criteria for selecting a specific filter chain for a +// listener. +message FilterChainMatch { + // If non-empty, the SNI domains to consider. May contain a wildcard prefix, + // e.g. ``*.example.com``. + // + // .. attention:: + // + // See the :ref:`FAQ entry ` on how to configure SNI for more + // information. + repeated string sni_domains = 1; + + // If non-empty, an IP address and prefix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + repeated core.CidrRange prefix_ranges = 3; + + // If non-empty, an IP address and suffix length to match addresses when the + // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. + // [#not-implemented-hide:] + string address_suffix = 4; + + // [#not-implemented-hide:] + google.protobuf.UInt32Value suffix_len = 5; + + // The criteria is satisfied if the source IP address of the downstream + // connection is contained in at least one of the specified subnets. If the + // parameter is not specified or the list is empty, the source IP address is + // ignored. + // [#not-implemented-hide:] + repeated core.CidrRange source_prefix_ranges = 6; + + // The criteria is satisfied if the source port of the downstream connection + // is contained in at least one of the specified ports. If the parameter is + // not specified, the source port is ignored. + // [#not-implemented-hide:] + repeated google.protobuf.UInt32Value source_ports = 7; + + // Optional destination port to consider when use_original_dst is set on the + // listener in determining a filter chain match. + // [#not-implemented-hide:] + google.protobuf.UInt32Value destination_port = 8; +} + +// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and +// various other parameters. +message FilterChain { + // The criteria to use when matching a connection to this filter chain. + FilterChainMatch filter_chain_match = 1; + + // The TLS context for this filter chain. + auth.DownstreamTlsContext tls_context = 2; + + // A list of individual network filters that make up the filter chain for + // connections established with the listener. Order matters as the filters are + // processed sequentially as connection events happen. Note: If the filter + // list is empty, the connection will close by default. + repeated Filter filters = 3 [(gogoproto.nullable) = false]; + + // Whether the listener should expect a PROXY protocol V1 header on new + // connections. If this option is enabled, the listener will assume that that + // remote address of the connection is the one specified in the header. Some + // load balancers including the AWS ELB support this option. If the option is + // absent or set to false, Envoy will use the physical peer address of the + // connection as the remote address. + google.protobuf.BoolValue use_proxy_proto = 4; + + // [#not-implemented-hide:] filter chain metadata. + core.Metadata metadata = 5; + + // See :ref:`base.TransportSocket` description. + core.TransportSocket transport_socket = 6; +} + +message ListenerFilter { + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(mattklein123): Auto generate the following list] + // * :ref:`envoy.listener.original_dst ` + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Struct config = 2; +} diff --git a/api/envoy/api/v2/ratelimit/BUILD b/api/envoy/api/v2/ratelimit/BUILD new file mode 100644 index 000000000000..e51c0e232aed --- /dev/null +++ b/api/envoy/api/v2/ratelimit/BUILD @@ -0,0 +1,14 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "ratelimit", + srcs = ["ratelimit.proto"], + visibility = ["//envoy/api/v2:friends"], +) + +api_go_proto_library( + name = "ratelimit", + proto = ":ratelimit", +) diff --git a/api/envoy/api/v2/ratelimit/ratelimit.proto b/api/envoy/api/v2/ratelimit/ratelimit.proto new file mode 100644 index 000000000000..47818cdc3ebe --- /dev/null +++ b/api/envoy/api/v2/ratelimit/ratelimit.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package envoy.api.v2.ratelimit; +option go_package = "ratelimit"; + +import "validate/validate.proto"; + +// [#protodoc-title: Common rate limit components] + +// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to +// determine the final rate limit key and overall allowed limit. Here are some examples of how +// they might be used for the domain "envoy". +// +// .. code-block:: cpp +// +// ["authenticated": "false"], ["remote_address": "10.0.0.1"] +// +// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The +// configuration supplies a default limit for the *remote_address* key. If there is a desire to +// raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the +// configuration. +// +// .. code-block:: cpp +// +// ["authenticated": "false"], ["path": "/foo/bar"] +// +// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if +// configured that way in the service). +// +// .. code-block:: cpp +// +// ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] +// +// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. +// Like (1) we can raise/block specific IP addresses if we want with an override configuration. +// +// .. code-block:: cpp +// +// ["authenticated": "true"], ["client_id": "foo"] +// +// What it does: Limits all traffic for an authenticated client "foo" +// +// .. code-block:: cpp +// +// ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] +// +// What it does: Limits traffic to a specific path for an authenticated client "foo" +// +// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. +// This enables building complex application scenarios with a generic backend. +message RateLimitDescriptor { + message Entry { + // Descriptor key. + string key = 1 [(validate.rules).string.min_bytes = 1]; + + // Descriptor value. + string value = 2 [(validate.rules).string.min_bytes = 1]; + } + + // Descriptor entries. + repeated Entry entries = 1 [(validate.rules).repeated .min_items = 1]; +} diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto new file mode 100644 index 000000000000..e820852defc4 --- /dev/null +++ b/api/envoy/api/v2/rds.proto @@ -0,0 +1,89 @@ +syntax = "proto3"; + +package envoy.api.v2; + +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/discovery.proto"; +import "envoy/api/v2/route/route.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/wrappers.proto"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: HTTP route configuration] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The resource_names field in DiscoveryRequest specifies a route configuration. +// This allows an Envoy configuration with multiple HTTP listeners (and +// associated HTTP connection manager filters) to use different route +// configurations. Each listener will bind its HTTP connection manager filter to +// a route table via this identifier. +service RouteDiscoveryService { + rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { + } + + rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:routes" + body: "*" + }; + } +} + +message RouteConfiguration { + // The name of the route configuration. For example, it might match + // :ref:`route_config_name + // ` in + // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. + string name = 1; + + // An array of virtual hosts that make up the route table. + repeated route.VirtualHost virtual_hosts = 2 [(gogoproto.nullable) = false]; + + // Optionally specifies a list of HTTP headers that the connection manager + // will consider to be internal only. If they are found on external requests they will be cleaned + // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + // information. + repeated string internal_only_headers = 3; + + // Specifies a list of HTTP headers that should be added to each response that + // the connection manager encodes. Headers specified at this level are applied + // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or + // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 4; + + // Specifies a list of HTTP headers that should be removed from each response + // that the connection manager encodes. + repeated string response_headers_to_remove = 5; + + // Specifies a list of HTTP headers that should be added to each request + // routed by the HTTP connection manager. Headers specified at this level are + // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or + // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 6; + + // An optional boolean that specifies whether the clusters that the route + // table refers to will be validated by the cluster manager. If set to true + // and a route refers to a non-existent cluster, the route table will not + // load. If set to false and a route refers to a non-existent cluster, the + // route table will load and the router filter will return a 404 if the route + // is selected at runtime. This setting defaults to true if the route table + // is statically defined via the :ref:`route_config + // ` + // option. This setting default to false if the route table is loaded dynamically via the + // :ref:`rds + // ` + // option. Users may which to override the default behavior in certain cases (for example when + // using CDS with a static route table). + google.protobuf.BoolValue validate_clusters = 7; +} diff --git a/api/envoy/api/v2/route/BUILD b/api/envoy/api/v2/route/BUILD new file mode 100644 index 000000000000..4604ccfa5dca --- /dev/null +++ b/api/envoy/api/v2/route/BUILD @@ -0,0 +1,24 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "route", + srcs = ["route.proto"], + visibility = ["//envoy/api/v2:friends"], + deps = [ + "//envoy/api/v2/auth", + "//envoy/api/v2/core:base", + "//envoy/type:range", + ], +) + +api_go_proto_library( + name = "route", + proto = ":route", + deps = [ + "//envoy/api/v2/auth:auth_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/type:range_go_proto", + ], +) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto new file mode 100644 index 000000000000..4d87d414face --- /dev/null +++ b/api/envoy/api/v2/route/route.proto @@ -0,0 +1,896 @@ +syntax = "proto3"; + +package envoy.api.v2.route; +option go_package = "route"; +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/auth/auth.proto"; +import "envoy/type/range.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: HTTP route] +// * Routing :ref:`architecture overview ` +// * HTTP :ref:`router filter ` + +// The top level element in the routing configuration is a virtual host. Each virtual host has +// a logical name as well as a set of domains that get routed to it based on the incoming request's +// host header. This allows a single listener to service multiple top level domain path trees. Once +// a virtual host is selected based on the domain, the routes are processed in order to see which +// upstream cluster to route to or whether to perform a redirect. +message VirtualHost { + // The logical name of the virtual host. This is used when emitting certain + // statistics but is not relevant for routing. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // A list of domains (host/authority header) that will be matched to this + // virtual host. Wildcard hosts are supported in the form of “*.foo.com” or + // “*-bar.foo.com”. + // + // .. note:: + // + // The wildcard will not match the empty string. + // e.g. “*-bar.foo.com” will match “baz-bar.foo.com” but not “-bar.foo.com”. + // Additionally, a special entry “*” is allowed which will match any + // host/authority header. Only a single virtual host in the entire route + // configuration can match on “*”. A domain must be unique across all virtual + // hosts or the config will fail to load. + repeated string domains = 2 [(validate.rules).repeated .min_items = 1]; + + // The list of routes that will be matched, in order, for incoming requests. + // The first route that matches will be used. + repeated Route routes = 3 [(gogoproto.nullable) = false]; + + enum TlsRequirementType { + // No TLS requirement for the virtual host. + NONE = 0; + + // External requests must use TLS. If a request is external and it is not + // using TLS, a 301 redirect will be sent telling the client to use HTTPS. + EXTERNAL_ONLY = 1; + + // All requests must use TLS. If a request is not using TLS, a 301 redirect + // will be sent telling the client to use HTTPS. + ALL = 2; + } + + // Specifies the type of TLS enforcement the virtual host expects. If this option is not + // specified, there is no TLS requirement for the virtual host. + TlsRequirementType require_tls = 4; + + // A list of virtual clusters defined for this virtual host. Virtual clusters + // are used for additional statistics gathering. + repeated VirtualCluster virtual_clusters = 5; + + // Specifies a set of rate limit configurations that will be applied to the + // virtual host. + repeated RateLimit rate_limits = 6; + + // Specifies a list of HTTP headers that should be added to each request + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_route.RouteAction` and before headers from the + // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 7; + + // Specifies a list of HTTP headers that should be added to each response + // handled by this virtual host. Headers specified at this level are applied + // after headers from enclosed :ref:`envoy_api_msg_route.RouteAction` and before headers from the + // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 10; + + // Specifies a list of HTTP headers that should be removed from each response + // handled by this virtual host. + repeated string response_headers_to_remove = 11; + + // Indicates that the virtual host has a CORS policy. + CorsPolicy cors = 8; + + // [#not-implemented-hide:] + // Return a 401/403 when auth checks fail. + auth.AuthAction auth = 9; + + // The per_filter_config field can be used to provide virtual host-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map per_filter_config = 12; +} + +// A route is both a specification of how to match a request as well as an indication of what to do +// next (e.g., redirect, forward, rewrite, etc.). +// +// .. attention:: +// +// Envoy supports routing on HTTP method via :ref:`header matching +// `. +message Route { + // Route matching parameters. + RouteMatch match = 1 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; + + oneof action { + option (validate.required) = true; + + // Route request to some upstream cluster. + RouteAction route = 2; + + // Return a redirect. + RedirectAction redirect = 3; + + // Return an arbitrary HTTP response directly, without proxying. + DirectResponseAction direct_response = 7; + } + + // The Metadata field can be used to provide additional information + // about the route. It can be used for configuration, stats, and logging. + // The metadata should go under the filter namespace that will need it. + // For instance, if the metadata is intended for the Router filter, + // the filter name should be specified as *envoy.router*. + core.Metadata metadata = 4; + + // Decorator for the matched route. + Decorator decorator = 5; + + // [#not-implemented-hide:] + // Return a 401/403 when auth checks fail. + auth.AuthAction auth = 6; + + // The per_filter_config field can be used to provide route-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` for + // if and how it is utilized. + map per_filter_config = 8; +} + +// Compared to the :ref:`cluster ` field that specifies a +// single upstream cluster as the target of a request, the :ref:`weighted_clusters +// ` option allows for specification of +// multiple upstream clusters along with weights that indicate the percentage of +// traffic to be forwarded to each cluster. The router selects an upstream cluster based on the +// weights. +message WeightedCluster { + message ClusterWeight { + // Name of the upstream cluster. The cluster must exist in the + // :ref:`cluster manager configuration `. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // An integer between 0 and :ref:`total_weight + // `. When a request matches the route, + // the choice of an upstream cluster is determined by its weight. The sum of weights across all + // entries in the clusters array must add up to the total_weight, which defaults to 100. + google.protobuf.UInt32Value weight = 2; + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + core.Metadata metadata_match = 3; + + // Specifies a list of headers to be added to requests when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_route.RouteAction`, + // :ref:`envoy_api_msg_route.VirtualHost`, and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 4; + + // Specifies a list of headers to be added to responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + // Headers specified at this level are applied before headers from the enclosing + // :ref:`envoy_api_msg_route.RouteAction`, + // :ref:`envoy_api_msg_route.VirtualHost`, and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption response_headers_to_add = 5; + + // Specifies a list of headers to be removed from responses when this cluster is selected + // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. + repeated string response_headers_to_remove = 6; + + reserved 7; + + // The per_filter_config field can be used to provide weighted cluster-specific + // configurations for filters. The key should match the filter name, such as + // *envoy.buffer* for the HTTP buffer filter. Use of this field is filter + // specific; see the :ref:`HTTP filter documentation ` + // for if and how it is utilized. + map per_filter_config = 8; + } + + // Specifies one or more upstream clusters associated with the route. + repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1]; + + // Specifies the total weight across all clusters. The sum of all cluster weights must equal this + // value, which must be greater than 0. Defaults to 100. + google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32.gte = 1]; + + // Specifies the runtime key prefix that should be used to construct the + // runtime keys associated with each cluster. When the *runtime_key_prefix* is + // specified, the router will look for weights associated with each upstream + // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where + // *cluster[i]* denotes an entry in the clusters array field. If the runtime + // key for the cluster does not exist, the value specified in the + // configuration file will be used as the default weight. See the :ref:`runtime documentation + // ` for how key names map to the underlying implementation. + string runtime_key_prefix = 2; +} + +message RouteMatch { + oneof path_specifier { + option (validate.required) = true; + + // If specified, the route is a prefix rule meaning that the prefix must + // match the beginning of the *:path* header. + string prefix = 1; + + // If specified, the route is an exact path rule meaning that the path must + // exactly match the *:path* header once the query string is removed. + string path = 2; + + // If specified, the route is a regular expression rule meaning that the + // regex must match the *:path* header once the query string is removed. The entire path + // (without the query string) must match the regex. The rule will not match if only a + // subsequence of the *:path* header matches the regex. The regex grammar is defined `here + // `_. + // + // Examples: + // + // * The regex */b[io]t* matches the path */bit* + // * The regex */b[io]t* matches the path */bot* + // * The regex */b[io]t* does not match the path */bite* + // * The regex */b[io]t* does not match the path */bit/bot* + string regex = 3; + } + + // Indicates that prefix/path matching should be case insensitive. The default + // is true. + google.protobuf.BoolValue case_sensitive = 4; + + // Indicates that the route should additionally match on a runtime key. An + // integer between 0-100. Every time the route is considered for a match, a + // random number between 0-99 is selected. If the number is <= the value found + // in the key (checked first) or, if the key is not present, the default + // value, the route is a match (assuming everything also about the route + // matches). A runtime route configuration can be used to roll out route changes in a + // gradual manner without full code/config deploys. Refer to the + // :ref:`traffic shifting ` docs + // for additional documentation. + core.RuntimeUInt32 runtime = 5; + + // Specifies a set of headers that the route should match on. The router will + // check the request’s headers against all the specified headers in the route + // config. A match will happen if all the headers in the route are present in + // the request with the same values (or based on presence if the value field + // is not in the config). + repeated HeaderMatcher headers = 6; + + // Specifies a set of URL query parameters on which the route should + // match. The router will check the query string from the *path* header + // against all the specified query parameters. If the number of specified + // query parameters is nonzero, they all must match the *path* header's + // query string for a match to occur. + repeated QueryParameterMatcher query_parameters = 7; +} + +message CorsPolicy { + // Specifies the origins that will be allowed to do CORS requests. + repeated string allow_origin = 1; + + // Specifies the content for the *access-control-allow-methods* header. + string allow_methods = 2; + + // Specifies the content for the *access-control-allow-headers* header. + string allow_headers = 3; + + // Specifies the content for the *access-control-expose-headers* header. + string expose_headers = 4; + + // Specifies the content for the *access-control-max-age* header. + string max_age = 5; + + // Specifies whether the resource allows credentials. + google.protobuf.BoolValue allow_credentials = 6; + + // Specifies if CORS is enabled. Defaults to true. Only effective on route. + google.protobuf.BoolValue enabled = 7; +} + +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed + // to. + string cluster = 1; + + // Envoy will determine the cluster to route to by reading the value of the + // HTTP header named by cluster_header from the request headers. If the + // header is not found or the referenced cluster does not exist, Envoy will + // return a 404 response. + // + // .. attention:: + // + // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 + // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. + string cluster_header = 2; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. See + // :ref:`traffic splitting ` + // for additional documentation. + WeightedCluster weighted_clusters = 3; + } + + enum ClusterNotFoundResponseCode { + // HTTP status code - 503 Service Unavailable. + SERVICE_UNAVAILABLE = 0; + + // HTTP status code - 404 Not Found. + NOT_FOUND = 1; + } + + // The HTTP status code to use when configured cluster is not found. + // The default response code is 503 Service Unavailable. + ClusterNotFoundResponseCode cluster_not_found_response_code = 20 + [(validate.rules).enum.defined_only = true]; + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + core.Metadata metadata_match = 4; + + // Indicates that during forwarding, the matched prefix (or path) should be + // swapped with this value. This option allows application URLs to be rooted + // at a different path from those exposed at the reverse proxy layer. + string prefix_rewrite = 5; + + oneof host_rewrite_specifier { + // Indicates that during forwarding, the host header will be swapped with + // this value. + string host_rewrite = 6; + + // Indicates that during forwarding, the host header will be swapped with + // the hostname of the upstream host chosen by the cluster manager. This + // option is applicable only when the destination cluster for a route is of + // type *strict_dns* or *logical_dns*. Setting this to true with other cluster + // types has no effect. + google.protobuf.BoolValue auto_host_rewrite = 7; + } + + // Specifies the timeout for the route. If not specified, the default is 15s. + // + // .. note:: + // + // This timeout includes all retries. See also + // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + // :ref:`retry overview `. + google.protobuf.Duration timeout = 8 [(gogoproto.stdduration) = true]; + + // HTTP retry :ref:`architecture overview `. + message RetryPolicy { + // Specifies the conditions under which retry takes place. These are the same + // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + string retry_on = 1; + + // Specifies the allowed number of retries. This parameter is optional and + // defaults to 1. These are the same conditions documented for + // :ref:`config_http_filters_router_x-envoy-max-retries`. + google.protobuf.UInt32Value num_retries = 2; + + // Specifies a non-zero timeout per retry attempt. This parameter is optional. + // The same conditions documented for + // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + // + // .. note:: + // + // If left unspecified, Envoy will use the global + // :ref:`route timeout ` for the request. + // Consequently, when using a :ref:`5xx ` based + // retry policy, a request that times out will not be retried as the total timeout budget + // would have been exhausted. + google.protobuf.Duration per_try_timeout = 3 [(gogoproto.stdduration) = true]; + } + + // Indicates that the route has a retry policy. + RetryPolicy retry_policy = 9; + + // The router is capable of shadowing traffic from one cluster to another. The current + // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + // respond before returning the response from the primary cluster. All normal statistics are + // collected for the shadow cluster making this feature useful for testing. + // + // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is + // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + message RequestMirrorPolicy { + // Specifies the cluster that requests will be mirrored to. The cluster must + // exist in the cluster manager configuration. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // If not specified, all requests to the target cluster will be mirrored. If + // specified, Envoy will lookup the runtime key to get the % of requests to + // mirror. Valid values are from 0 to 10000, allowing for increments of + // 0.01% of requests to be mirrored. If the runtime key is specified in the + // configuration but not present in runtime, 0 is the default and thus 0% of + // requests will be mirrored. + string runtime_key = 2; + } + + // Indicates that the route has a request mirroring policy. + RequestMirrorPolicy request_mirror_policy = 10; + + // Optionally specifies the :ref:`routing priority `. + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + core.RoutingPriority priority = 11; + + // Specifies a set of headers that will be added to requests matching this + // route. Headers specified at this level are applied before headers from the + // enclosing :ref:`envoy_api_msg_route.VirtualHost` and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on + // header value syntax, see the documentation on :ref:`custom request headers + // `. + repeated core.HeaderValueOption request_headers_to_add = 12; + + // Specifies a set of headers that will be added to responses to requests + // matching this route. Headers specified at this level are applied before + // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and + // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + // details on header value syntax, see the documentation on + // :ref:`custom request headers `. + repeated core.HeaderValueOption response_headers_to_add = 18; + + // Specifies a list of HTTP headers that should be removed from each response + // to requests matching this route. + repeated string response_headers_to_remove = 19; + + // Specifies a set of rate limit configurations that could be applied to the + // route. + repeated RateLimit rate_limits = 13; + + // Specifies if the rate limit filter should include the virtual host rate + // limits. By default, if the route configured rate limits, the virtual host + // :ref:`rate_limits ` are not applied to the + // request. + google.protobuf.BoolValue include_vh_rate_limits = 14; + + // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + // `. + message HashPolicy { + message Header { + // The name of the request header that will be used to obtain the hash + // key. If the request header is not present, no hash will be produced. + string header_name = 1 [(validate.rules).string.min_bytes = 1]; + } + + // Envoy supports two types of cookie affinity: + // + // 1. Passive. Envoy takes a cookie that's present in the cookies header and + // hashes on its value. + // + // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + // on the first request from the client in its response to the client, + // based on the endpoint the request gets sent to. The client then + // presents this on the next and all subsequent requests. The hash of + // this is sufficient to ensure these requests get sent to the same + // endpoint. The cookie is generated by hashing the source and + // destination ports and addresses so that multiple independent HTTP2 + // streams on the same connection will independently receive the same + // cookie, even if they arrive at the Envoy simultaneously. + message Cookie { + // The name of the cookie that will be used to obtain the hash key. If the + // cookie is not present and ttl below is not set, no hash will be + // produced. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // If specified, a cookie with the TTL will be generated if the cookie is + // not present. + google.protobuf.Duration ttl = 2 [(gogoproto.stdduration) = true]; + } + + message ConnectionProperties { + // Hash on source IP address. + bool source_ip = 1; + } + + oneof policy_specifier { + option (validate.required) = true; + + // Header hash policy. + Header header = 1; + + // Cookie hash policy. + Cookie cookie = 2; + + // Connection properties hash policy. + ConnectionProperties connection_properties = 3; + } + } + + // Specifies a list of hash policies to use for ring hash load balancing. Each + // hash policy is evaluated individually and the combined result is used to + // route the request. The method of combination is deterministic such that + // identical lists of hash policies will produce the same hash. Since a hash + // policy examines specific parts of a request, it can fail to produce a hash + // (i.e. if the hashed header is not present). If (and only if) all configured + // hash policies fail to generate a hash, no hash will be produced for + // the route. In this case, the behavior is the same as if no hash policies + // were specified (i.e. the ring hash load balancer will choose a random + // backend). + repeated HashPolicy hash_policy = 15; + + // Indicates that a HTTP/1.1 client connection to this particular route + // should be allowed (and expected) to upgrade to a WebSocket connection. The + // default is false. + // + // .. attention:: + // + // If set to true, Envoy will expect the first request matching this route to + // contain WebSocket upgrade headers. If the headers are not present, the + // connection will be rejected. If set to true, Envoy will setup plain TCP + // proxying between the client and the upstream server. Hence, an upstream + // server that rejects the WebSocket upgrade request is also responsible for + // closing the associated connection. Until then, Envoy will continue to + // proxy data from the client to the upstream server. + // + // Redirects, timeouts and retries are not supported on routes where websocket upgrades are + // allowed. + google.protobuf.BoolValue use_websocket = 16; + + // Indicates that the route has a CORS policy. + CorsPolicy cors = 17; + + reserved 21; +} + +message RedirectAction { + // The host portion of the URL will be swapped with this value. + string host_redirect = 1; + + oneof path_rewrite_specifier { + // The path portion of the URL will be swapped with this value. + string path_redirect = 2; + + // Indicates that during redirection, the matched prefix (or path) + // should be swapped with this value. This option allows redirect URLs be dynamically created + // based on the request. + string prefix_rewrite = 5; + } + + enum RedirectResponseCode { + // Moved Permanently HTTP Status Code - 301. + MOVED_PERMANENTLY = 0; + + // Found HTTP Status Code - 302. + FOUND = 1; + + // See Other HTTP Status Code - 303. + SEE_OTHER = 2; + + // Temporary Redirect HTTP Status Code - 307. + TEMPORARY_REDIRECT = 3; + + // Permanent Redirect HTTP Status Code - 308. + PERMANENT_REDIRECT = 4; + } + + // The HTTP status code to use in the redirect response. The default response + // code is MOVED_PERMANENTLY (301). + RedirectResponseCode response_code = 3 [(validate.rules).enum.defined_only = true]; + + // The scheme portion of the URL will be swapped with "https". + bool https_redirect = 4; + + // Indicates that during redirection, the query portion of the URL will + // be removed. Default value is false. + bool strip_query = 6; +} + +message DirectResponseAction { + // Specifies the HTTP response status to be returned. + uint32 status = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}]; + + // Specifies the content of the response body. If this setting is omitted, + // no body is included in the generated response. + // + // .. note:: + // + // Headers can be specified using *response_headers_to_add* in + // :ref:`envoy_api_msg_RouteConfiguration`. + core.DataSource body = 2; +} + +message Decorator { + // The operation name associated with the request matched to this route. If tracing is + // enabled, this information will be used as the span name reported for this request. + // + // .. note:: + // + // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden + // by the :ref:`x-envoy-decorator-operation + // ` header. + string operation = 1 [(validate.rules).string.min_bytes = 1]; +} + +// A virtual cluster is a way of specifying a regex matching rule against +// certain important endpoints such that statistics are generated explicitly for +// the matched requests. The reason this is useful is that when doing +// prefix/path matching Envoy does not always know what the application +// considers to be an endpoint. Thus, it’s impossible for Envoy to generically +// emit per endpoint statistics. However, often systems have highly critical +// endpoints that they wish to get “perfect” statistics on. Virtual cluster +// statistics are perfect in the sense that they are emitted on the downstream +// side such that they include network level failures. +// +// Documentation for :ref:`virtual cluster statistics `. +// +// .. note:: +// +// Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for +// every application endpoint. This is both not easily maintainable and as well the matching and +// statistics output are not free. +message VirtualCluster { + // Specifies a regex pattern to use for matching requests. The entire path of the request + // must match the regex. The regex grammar used is defined `here + // `_. + // + // Examples: + // + // * The regex */rides/\d+* matches the path */rides/0* + // * The regex */rides/\d+* matches the path */rides/123* + // * The regex */rides/\d+* does not match the path */rides/123/456* + string pattern = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the name of the virtual cluster. The virtual cluster name as well + // as the virtual host name are used when emitting statistics. The statistics are emitted by the + // router filter and are documented :ref:`here `. + string name = 2 [(validate.rules).string.min_bytes = 1]; + + // Optionally specifies the HTTP method to match on. For example GET, PUT, + // etc. + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + core.RequestMethod method = 3; +} + +// Global rate limiting :ref:`architecture overview `. +message RateLimit { + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32.lte = 10]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + message Action { + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + } + + // The following descriptor entry is appended when a header contains a key that matches the + // *header_name*: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 [(validate.rules).string.min_bytes = 1]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string.min_bytes = 1]; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string.min_bytes = 1]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + google.protobuf.BoolValue expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated HeaderMatcher headers = 3 [(validate.rules).repeated .min_items = 1]; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + } + } + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated .min_items = 1]; +} + +// .. attention:: +// +// Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* +// header. Thus, if attempting to match on *Host*, match on *:authority* instead. +// +// .. attention:: +// +// To route on HTTP method, use the special HTTP/2 *:method* header. This works for both +// HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., +// +// .. code-block:: json +// +// { +// "name": ":method", +// "value": "POST" +// } +message HeaderMatcher { + // Specifies the name of the header in the request. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the value of the header. If the value is absent a request that + // has the name header will match, regardless of the header’s value. + string value = 2; + + // Specifies whether the header value is a regular + // expression or not. Defaults to false. The entire request header value must match the regex. The + // rule will not match if only a subsequence of the request header value matches the regex. The + // regex grammar used in the value field is defined + // `here `_. + // + // Examples: + // + // * The regex *\d{3}* matches the value *123* + // * The regex *\d{3}* does not match the value *1234* + // * The regex *\d{3}* does not match the value *123.456* + google.protobuf.BoolValue regex = 3; + + // Specifies how the header match will be performed to route the request. + // If header_match_specifier is absent, a request that has the + // :ref:`envoy_api_msg_route.HeaderMatcher.name` header will match, regardless of the header's + // value. + oneof header_match_specifier { + // If specified, header match will be performed based on the value of the header. + string exact_match = 4; + + // If specified, this regex string is a regular expression rule which implies the entire request + // header value must match the regex. The rule will not match if only a subsequence of the + // request header value matches the regex. The regex grammar used in the value field is defined + // `here `_. + // + // Examples: + // + // * The regex *\d{3}* matches the value *123* + // * The regex *\d{3}* does not match the value *1234* + // * The regex *\d{3}* does not match the value *123.456* + string regex_match = 5; + + // If specified, header match will be performed based on range. + // The rule will match if the request header value is within this range. + // The entire request header value must represent an integer in base 10 notation: consisting of + // an optional plus or minus sign followed by a sequence of digits. The rule will not match if + // the header value does not represent an integer. Match will fail for empty values, floating + // point numbers or if only a subsequence of the header value is an integer. + // + // Examples: + // + // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, + // "-1somestring" + envoy.type.Int64Range range_match = 6; + } +} + +// Query parameter matching treats the query string of a request's :path header +// as an ampersand-separated list of keys and/or key=value elements. +message QueryParameterMatcher { + // Specifies the name of a key that must be present in the requested + // *path*'s query string. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the value of the key. If the value is absent, a request + // that contains the key in its query string will match, whether the + // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") + string value = 3; + + // Specifies whether the query parameter value is a regular expression. + // Defaults to false. The entire query parameter value (i.e., the part to + // the right of the equals sign in "key=value") must match the regex. + // E.g., the regex "\d+$" will match "123" but not "a123" or "123a". + google.protobuf.BoolValue regex = 4; +} diff --git a/api/envoy/config/README.md b/api/envoy/config/README.md new file mode 100644 index 000000000000..279bd7c2e852 --- /dev/null +++ b/api/envoy/config/README.md @@ -0,0 +1,3 @@ +Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration. + +Visibility should be constrained to none or `//envoy/config/bootstrap/v2` by default. diff --git a/api/envoy/config/accesslog/v2/BUILD b/api/envoy/config/accesslog/v2/BUILD new file mode 100644 index 000000000000..355fe3e9810b --- /dev/null +++ b/api/envoy/config/accesslog/v2/BUILD @@ -0,0 +1,11 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "als", + srcs = ["als.proto"], + deps = [ + "//envoy/api/v2/core:grpc_service", + ], +) diff --git a/api/envoy/config/accesslog/v2/als.proto b/api/envoy/config/accesslog/v2/als.proto new file mode 100644 index 000000000000..305e7860e501 --- /dev/null +++ b/api/envoy/config/accesslog/v2/als.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package envoy.config.accesslog.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// Configuration for the built-in *envoy.tcp_grpc_access_log* type. This configuration will +// populate *StreamAccessLogsMessage.tcp_logs*. +// [#not-implemented-hide:] +// [#comment:TODO(mattklein123): Block type in non-tcp proxy cases?] +message TcpGrpcAccessLogConfig { + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; +} + +// Configuration for the built-in *envoy.http_grpc_access_log* type. This configuration will +// populate *StreamAccessLogsMessage.http_logs*. +// [#not-implemented-hide:] +// [#comment:TODO(mattklein123): Block type in non-http/router proxy cases?] +message HttpGrpcAccessLogConfig { + CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message.required = true]; + + // Additional request headers to log in *HTTPRequestProperties.request_headers*. + repeated string additional_request_headers_to_log = 2; + + // Additional response headers to log in *HTTPResponseProperties.response_headers*. + repeated string additional_response_headers_to_log = 3; +} + +// Common configuration for gRPC access logs. +// [#not-implemented-hide:] +message CommonGrpcAccessLogConfig { + // The friendly name of the access log to be returned in StreamAccessLogsMessage.Identifier. This + // allows the access log server to differentiate between different access logs coming from the + // same Envoy. + string log_name = 1 [(validate.rules).string.min_bytes = 1]; + + // The gRPC service for the access log service. + envoy.api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/bootstrap/v2/BUILD b/api/envoy/config/bootstrap/v2/BUILD new file mode 100644 index 000000000000..80736d98196f --- /dev/null +++ b/api/envoy/config/bootstrap/v2/BUILD @@ -0,0 +1,37 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "bootstrap", + srcs = ["bootstrap.proto"], + deps = [ + "//envoy/api/v2:cds", + "//envoy/api/v2:lds", + "//envoy/api/v2/auth:cert", + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", + "//envoy/config/metrics/v2:metrics_service", + "//envoy/config/metrics/v2:stats", + "//envoy/config/ratelimit/v2:rls", + "//envoy/config/trace/v2:trace", + ], +) + +api_go_proto_library( + name = "bootstrap", + proto = ":bootstrap", + deps = [ + "//envoy/api/v2:cds_go_grpc", + "//envoy/api/v2:lds_go_grpc", + "//envoy/api/v2/auth:cert_go_proto", + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", + "//envoy/config/metrics/v2:metrics_service_go_proto", + "//envoy/config/metrics/v2:stats_go_proto", + "//envoy/config/ratelimit/v2:rls_go_grpc", + "//envoy/config/trace/v2:trace_go_proto", + ], +) diff --git a/api/envoy/config/bootstrap/v2/bootstrap.proto b/api/envoy/config/bootstrap/v2/bootstrap.proto new file mode 100644 index 000000000000..5f7b4b79db47 --- /dev/null +++ b/api/envoy/config/bootstrap/v2/bootstrap.proto @@ -0,0 +1,215 @@ +// [#protodoc-title: Bootstrap] +// This proto is supplied via the :option:`-c` CLI flag and acts as the root +// of the Envoy v2 configuration. See the :ref:`v2 configuration overview +// ` for more detail. + +syntax = "proto3"; + +package envoy.config.bootstrap.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/auth/cert.proto"; +import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/cds.proto"; +import "envoy/api/v2/lds.proto"; +import "envoy/config/trace/v2/trace.proto"; +import "envoy/config/metrics/v2/stats.proto"; +import "envoy/config/ratelimit/v2/rls.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// Bootstrap :ref:`configuration overview `. +message Bootstrap { + // Node identity to present to the management server and for instance + // identification purposes (e.g. in generated headers). + envoy.api.v2.core.Node node = 1; + + message StaticResources { + // Static :ref:`Listeners `. These listeners are + // available regardless of LDS configuration. + repeated envoy.api.v2.Listener listeners = 1 [(gogoproto.nullable) = false]; + + // If a network based configuration source is specified for :ref:`cds_config + // `, it's necessary + // to have some initial cluster definitions available to allow Envoy to know + // how to speak to the management server. These cluster definitions may not + // use :ref:`EDS ` (i.e. they should be static + // IP or DNS-based). + repeated envoy.api.v2.Cluster clusters = 2 [(gogoproto.nullable) = false]; + + // [#not-implemented-hide:] + repeated envoy.api.v2.auth.Secret secrets = 3 [(gogoproto.nullable) = false]; + } + // Statically specified resources. + StaticResources static_resources = 2; + + message DynamicResources { + // All :ref:`Listeners ` are provided by a single + // :ref:`LDS ` configuration source. + envoy.api.v2.core.ConfigSource lds_config = 1; + + // All post-bootstrap :ref:`Cluster ` definitions are + // provided by a single :ref:`CDS ` + // configuration source. + envoy.api.v2.core.ConfigSource cds_config = 2; + + // A single :ref:`ADS ` source may be optionally + // specified. This must have :ref:`api_type + // ` :ref:`GRPC + // `. Only + // :ref:`ConfigSources ` that have + // the :ref:`ads ` field set will be + // streamed on the ADS channel. + envoy.api.v2.core.ApiConfigSource ads_config = 3; + + // [#not-implemented-hide:] Hide from docs. + message DeprecatedV1 { + // This is the global :ref:`SDS ` config + // when using v1 REST for :ref:`CDS + // `/:ref:`EDS + // `. + envoy.api.v2.core.ConfigSource sds_config = 1; + } + + // [#not-implemented-hide:] Hide from docs. + DeprecatedV1 deprecated_v1 = 4 [deprecated = true]; + } + // xDS configuration sources. + DynamicResources dynamic_resources = 3; + + // Configuration for the cluster manager which owns all upstream clusters + // within the server. + ClusterManager cluster_manager = 4; + + // Optional file system path to search for startup flag files. + string flags_path = 5; + + // Optional set of stats sinks. + repeated envoy.config.metrics.v2.StatsSink stats_sinks = 6; + + // Configuration for internal processing of stats. + envoy.config.metrics.v2.StatsConfig stats_config = 13; + + // Optional duration between flushes to configured stats sinks. For + // performance reasons Envoy latches counters and only flushes counters and + // gauges at a periodic interval. If not specified the default is 5000ms (5 + // seconds). + google.protobuf.Duration stats_flush_interval = 7 [(gogoproto.stdduration) = true]; + + // Optional watchdog configuration. + Watchdog watchdog = 8; + + // Configuration for an external tracing provider. If not specified, no + // tracing will be performed. + envoy.config.trace.v2.Tracing tracing = 9; + + // Configuration for an external rate limit service provider. If not + // specified, any calls to the rate limit service will immediately return + // success. + envoy.config.ratelimit.v2.RateLimitServiceConfig rate_limit_service = 10; + + // Configuration for the runtime configuration provider. If not specified, a + // “null” provider will be used which will result in all defaults being used. + Runtime runtime = 11; + + // Configuration for the local administration HTTP server. + Admin admin = 12 [(validate.rules).message.required = true, (gogoproto.nullable) = false]; +} + +// Administration interface :ref:`operations documentation +// `. +message Admin { + // The path to write the access log for the administration server. If no + // access log is desired specify ‘/dev/null’. + string access_log_path = 1 [(validate.rules).string.min_bytes = 1]; + + // The cpu profiler output path for the administration server. If no profile + // path is specified, the default is ‘/var/log/envoy/envoy.prof’. + string profile_path = 2; + + // The TCP address that the administration server will listen on. + envoy.api.v2.core.Address address = 3 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; +} + +// Cluster manager :ref:`architecture overview `. +message ClusterManager { + // Name of the local cluster (i.e., the cluster that owns the Envoy running + // this configuration). In order to enable :ref:`zone aware routing + // ` this option must be set. + // If *local_cluster_name* is defined then :ref:`clusters + // ` must be defined in the :ref:`Bootstrap + // static cluster resources + // `. This is unrelated to + // the :option:`--service-cluster` option which does not `affect zone aware + // routing `_. + string local_cluster_name = 1; + + message OutlierDetection { + // Specifies the path to the outlier event log. + string event_log_path = 1; + } + // Optional global configuration for outlier detection. + OutlierDetection outlier_detection = 2; + + // Optional configuration used to bind newly established upstream connections. + // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. + envoy.api.v2.core.BindConfig upstream_bind_config = 3; + + // A management server endpoint to stream load stats to via + // *StreamLoadStats*. This must have :ref:`api_type + // ` :ref:`GRPC + // `. + envoy.api.v2.core.ApiConfigSource load_stats_config = 4; +} + +// Envoy process watchdog configuration. When configured, this monitors for +// nonresponsive threads and kills the process after the configured thresholds. +message Watchdog { + // The duration after which Envoy counts a nonresponsive thread in the + // *server.watchdog_miss* statistic. If not specified the default is 200ms. + google.protobuf.Duration miss_timeout = 1; + + // The duration after which Envoy counts a nonresponsive thread in the + // *server.watchdog_mega_miss* statistic. If not specified the default is + // 1000ms. + google.protobuf.Duration megamiss_timeout = 2; + + // If a watched thread has been nonresponsive for this duration, assume a + // programming error and kill the entire Envoy process. Set to 0 to disable + // kill behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration kill_timeout = 3; + + // If at least two watched threads have been nonresponsive for at least this + // duration assume a true deadlock and kill the entire Envoy process. Set to 0 + // to disable this behavior. If not specified the default is 0 (disabled). + google.protobuf.Duration multikill_timeout = 4; +} + +// Runtime :ref:`configuration overview `. +message Runtime { + // The implementation assumes that the file system tree is accessed via a + // symbolic link. An atomic link swap is used when a new tree should be + // switched to. This parameter specifies the path to the symbolic link. Envoy + // will watch the location for changes and reload the file system tree when + // they happen. + string symlink_root = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the subdirectory to load within the root directory. This is + // useful if multiple systems share the same delivery mechanism. Envoy + // configuration elements can be contained in a dedicated subdirectory. + string subdirectory = 2; + + // Specifies an optional subdirectory to load within the root directory. If + // specified and the directory exists, configuration values within this + // directory will override those found in the primary subdirectory. This is + // useful when Envoy is deployed across many different types of servers. + // Sometimes it is useful to have a per service cluster directory for runtime + // configuration. See below for exactly how the override directory is used. + string override_subdirectory = 3; +} diff --git a/api/envoy/config/filter/README.md b/api/envoy/config/filter/README.md new file mode 100644 index 000000000000..11f26a08f6e8 --- /dev/null +++ b/api/envoy/config/filter/README.md @@ -0,0 +1,24 @@ +Protocol buffer definitions for filters. + +Visibility of the definitions should be constrained to none except for +shared definitions between explicitly enumerated filters (e.g. accesslog and fault definitions). + +## NOTE + +If a filter configuration is not captured in the proto specification, you +can still supply plain JSON configuration objects for such filters by +setting the `"deprecated_v1"` field to true in the filter's +configuration. For example, + +```json +{ + "name": "envoy.rate_limit", + "config": { + "deprecated_v1": true, + "value": { + "domain": "some_domain", + "timeout_ms": 500 + } + } + } +``` diff --git a/api/envoy/config/filter/accesslog/v2/BUILD b/api/envoy/config/filter/accesslog/v2/BUILD new file mode 100644 index 000000000000..af0f16071647 --- /dev/null +++ b/api/envoy/config/filter/accesslog/v2/BUILD @@ -0,0 +1,33 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "accesslog", + srcs = ["accesslog.proto"], + visibility = [ + "//envoy/config/filter/http/router/v2:__pkg__", + "//envoy/config/filter/network/http_connection_manager/v2:__pkg__", + "//envoy/config/filter/network/tcp_proxy/v2:__pkg__", + "//envoy/service/accesslog/v2:__pkg__", + ], + deps = [ + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:grpc_service", + "//envoy/api/v2/route", + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "accesslog", + proto = ":accesslog", + deps = [ + "//envoy/api/v2/core:address_go_proto", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:grpc_service_go_proto", + "//envoy/api/v2/route:route_go_proto", + "//envoy/type:percent_go_proto", + ], +) diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto new file mode 100644 index 000000000000..b4d125e9b957 --- /dev/null +++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto @@ -0,0 +1,438 @@ +syntax = "proto3"; + +package envoy.config.filter.accesslog.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/route/route.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Common access log types] +// Envoy access logs describe incoming interaction with Envoy over a fixed +// period of time, and typically cover a single request/response exchange, +// (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). +// Access logs contain fields defined in protocol-specific protobuf messages. +// +// Except where explicitly declared otherwise, all fields describe +// *downstream* interaction between Envoy and a connected client. +// Fields describing *upstream* interaction will explicitly include ``upstream`` +// in their name. + +// Defines fields that are shared by all Envoy access logs. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message AccessLogCommon { + // This field indicates the rate at which this log entry was sampled. + // Valid range is (0.0, 1.0]. + double sample_rate = 1 [(validate.rules).double.gt = 0.0, (validate.rules).double.lte = 1.0]; + + // This field is the remote/origin address on which the request from the user was received. + // Note: This may not be the physical peer. E.g, if the remote address is inferred from for + // example the x-forwarder-for header, proxy protocol, etc. + envoy.api.v2.core.Address downstream_remote_address = 2; + + // This field is the local/destination address on which the request from the user was received. + envoy.api.v2.core.Address downstream_local_address = 3; + + // If the connection is secure, this field will contain TLS properties. + TLSProperties tls_properties = 4; + + // The time that Envoy started servicing this request. This is effectively the time that the first + // downstream byte is received. + google.protobuf.Timestamp start_time = 5 [(gogoproto.stdtime) = true]; + + // Interval between the first downstream byte received and the last + // downstream byte received (i.e. time it takes to receive a request). + google.protobuf.Duration time_to_last_rx_byte = 6 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the first upstream byte sent. There may + // by considerable delta between *time_to_last_rx_byte* and this value due to filters. + // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about + // not accounting for kernel socket buffer time, etc. + google.protobuf.Duration time_to_first_upstream_tx_byte = 7 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the last upstream byte sent. There may + // by considerable delta between *time_to_last_rx_byte* and this value due to filters. + // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about + // not accounting for kernel socket buffer time, etc. + google.protobuf.Duration time_to_last_upstream_tx_byte = 8 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the first upstream + // byte received (i.e. time it takes to start receiving a response). + google.protobuf.Duration time_to_first_upstream_rx_byte = 9 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the last upstream + // byte received (i.e. time it takes to receive a complete response). + google.protobuf.Duration time_to_last_upstream_rx_byte = 10 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the first downstream byte sent. + // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field + // due to filters. Additionally, the same caveats apply as documented in + // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. + google.protobuf.Duration time_to_first_downstream_tx_byte = 11 [(gogoproto.stdduration) = true]; + + // Interval between the first downstream byte received and the last downstream byte sent. + // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate + // time. In the current implementation it does not include kernel socket buffer time. In the + // current implementation it also does not include send window buffering inside the HTTP/2 codec. + // In the future it is likely that work will be done to make this duration more accurate. + google.protobuf.Duration time_to_last_downstream_tx_byte = 12 [(gogoproto.stdduration) = true]; + + // The upstream remote/destination address that handles this exchange. This does not include + // retries. + envoy.api.v2.core.Address upstream_remote_address = 13; + + // The upstream local/origin address that handles this exchange. This does not include retries. + envoy.api.v2.core.Address upstream_local_address = 14; + + // The upstream cluster that *upstream_remote_address* belongs to. + string upstream_cluster = 15; + + // Flags indicating occurrences during request/response processing. + ResponseFlags response_flags = 16; + + // All metadata encountered during request processing, including endpoint + // selection. + // + // This can be used to associate IDs attached to the various configurations + // used to process this request with the access log entry. For example, a + // route created from a higher level forwarding rule with some ID can place + // that ID in this field and cross reference later. It can also be used to + // determine if a canary endpoint was used or not. + envoy.api.v2.core.Metadata metadata = 17; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// Flags indicating occurrences during request/response processing. +message ResponseFlags { + // Indicates local server healthcheck failed. + bool failed_local_healthcheck = 1; + + // Indicates there was no healthy upstream. + bool no_healthy_upstream = 2; + + // Indicates an there was an upstream request timeout. + bool upstream_request_timeout = 3; + + // Indicates local codec level reset was sent on the stream. + bool local_reset = 4; + + // Indicates remote codec level reset was received on the stream. + bool upstream_remote_reset = 5; + + // Indicates there was a local reset by a connection pool due to an initial connection failure. + bool upstream_connection_failure = 6; + + // Indicates the stream was reset locally due to connection termination. + bool upstream_connection_termination = 7; + + // Indicates the stream was reset because of a resource overflow. + bool upstream_overflow = 8; + + // Indicates no route was found for the request. + bool no_route_found = 9; + + // Indicates that the request was delayed before proxying. + bool delay_injected = 10; + + // Indicates that the request was aborted with an injected error code. + bool fault_injected = 11; + + // Indicates that the request was rate-limited locally. + bool rate_limited = 12; + + message Unauthorized { + // Reasons why the request was unauthorized + enum Reason { + REASON_UNSPECIFIED = 0; + // The request was denied by the external authorization service. + EXTERNAL_SERVICE = 1; + } + + Reason reason = 1; + } + + // Indicates if the request was deemed unauthorized and the reason for it. + Unauthorized unauthorized_details = 13; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// Properties of a negotiated TLS connection. +message TLSProperties { + enum TLSVersion { + VERSION_UNSPECIFIED = 0; + TLSv1 = 1; + TLSv1_1 = 2; + TLSv1_2 = 3; + TLSv1_3 = 4; + } + // Version of TLS that was negotiated. + TLSVersion tls_version = 1; + + // TLS cipher suite negotiated during handshake. The value is a + // four-digit hex code defined by the IANA TLS Cipher Suite Registry + // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + // + // Here it is expressed as an integer. + google.protobuf.UInt32Value tls_cipher_suite = 2; + + // SNI hostname from handshake. + string tls_sni_hostname = 3; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message TCPAccessLogEntry { + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message HTTPRequestProperties { + // The request method (RFC 7231/2616). + // [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once + // https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.] + envoy.api.v2.core.RequestMethod request_method = 1; + + // The scheme portion of the incoming request URI. + string scheme = 2; + + // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. + string authority = 3; + + // The port of the incoming request URI + // (unused currently, as port is composed onto authority). + google.protobuf.UInt32Value port = 4; + + // The path portion from the incoming request URI. + string path = 5; + + // Value of the ``User-Agent`` request header. + string user_agent = 6; + + // Value of the ``Referer`` request header. + string referer = 7; + + // Value of the ``X-Forwarded-For`` request header. + string forwarded_for = 8; + + // Value of the ``X-Request-Id`` request header + // + // This header is used by Envoy to uniquely identify a request. + // It will be generated for all external requests and internal requests that + // do not already have a request ID. + string request_id = 9; + + // Value of the ``X-Envoy-Original-Path`` request header. + string original_path = 10; + + // Size of the HTTP request headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 request_headers_bytes = 11; + + // Size of the HTTP request body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 request_body_bytes = 12; + + // Map of additional headers that have been configured to be logged. + map request_headers = 13; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message HTTPResponseProperties { + // The HTTP response code returned by Envoy. + google.protobuf.UInt32Value response_code = 1; + + // Size of the HTTP response headers in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 response_headers_bytes = 2; + + // Size of the HTTP response body in bytes. + // + // This value is captured from the OSI layer 7 perspective, i.e. it does not + // include overhead from framing or encoding at other networking layers. + uint64 response_body_bytes = 3; + + // Map of additional headers configured to be logged. + map response_headers = 4; +} + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message HTTPAccessLogEntry { + // Common properties shared by all Envoy access logs. + AccessLogCommon common_properties = 1; + + // HTTP version + enum HTTPVersion { + PROTOCOL_UNSPECIFIED = 0; + HTTP10 = 1; + HTTP11 = 2; + HTTP2 = 3; + } + HTTPVersion protocol_version = 2; + + // Description of the incoming HTTP request. + HTTPRequestProperties request = 3; + + // Description of the outgoing HTTP response. + HTTPResponseProperties response = 4; +} + +message AccessLog { + // The name of the access log implementation to instantiate. The name must + // match a statically registered access log. Current built-in loggers include: + // 1) "envoy.file_access_log" + string name = 1; + + // Filter which is used to determine if the access log needs to be written. + AccessLogFilter filter = 2; + + // Custom configuration that depends on the access log being instantiated. built-in configurations + // include: + // 1) "envoy.file_access_log": :ref:`FileAccessLog + // ` + google.protobuf.Struct config = 3; +} + +message AccessLogFilter { + oneof filter_specifier { + option (validate.required) = true; + + // Status code filter. + StatusCodeFilter status_code_filter = 1; + + // Duration filter. + DurationFilter duration_filter = 2; + + // Not health check filter. + NotHealthCheckFilter not_health_check_filter = 3; + + // Traceable filter. + TraceableFilter traceable_filter = 4; + + // Runtime filter. + RuntimeFilter runtime_filter = 5; + + // And filter. + AndFilter and_filter = 6; + + // Or filter. + OrFilter or_filter = 7; + + // [#not-implemented-hide:] Header filter. + HeaderFilter header_filter = 8; + } +} + +// Filter on an integer comparison. +message ComparisonFilter { + enum Op { + // = + EQ = 0; + + // >= + GE = 1; + + // <= + LE = 2; + } + + // Comparison operator. + Op op = 1 [(validate.rules).enum.defined_only = true]; + + // Value to compare against. + envoy.api.v2.core.RuntimeUInt32 value = 2; +} + +// Filters on HTTP response/status code. +message StatusCodeFilter { + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; +} + +// Filters on total request duration in milliseconds. +message DurationFilter { + // Comparison. + ComparisonFilter comparison = 1 [(validate.rules).message.required = true]; +} + +// Filters for requests that are not health check requests. A health check +// request is marked by the health check filter. +message NotHealthCheckFilter { +} + +// Filters for requests that are traceable. See the tracing overview for more +// information on how a request becomes traceable. +message TraceableFilter { +} + +// Filters for random sampling of requests. +message RuntimeFilter { + // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. + // If found in runtime, this value will replace the default numerator. + string runtime_key = 1 [(validate.rules).string.min_bytes = 1]; + + // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. + envoy.type.FractionalPercent percent_sampled = 2; + + // By default, sampling pivots on the header + // :ref:`x-request-id` being present. If + // :ref:`x-request-id` is present, the filter will + // consistently sample across multiple hosts based on the runtime key value and the value + // extracted from :ref:`x-request-id`. If it is + // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based + // on the runtime key value alone. *use_independent_randomness* can be used for logging kill + // switches within complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to reason about + // from a probability perspective (i.e., setting to true will cause the filter to behave like + // an independent random variable when composed within logical operator filters). + bool use_independent_randomness = 3; +} + +// Performs a logical “and” operation on the result of each filter in filters. +// Filters are evaluated sequentially and if one of them returns false, the +// filter returns false immediately. +message AndFilter { + repeated AccessLogFilter filters = 1 [(validate.rules).repeated .min_items = 2]; +} + +// Performs a logical “or” operation on the result of each individual filter. +// Filters are evaluated sequentially and if one of them returns true, the +// filter returns true immediately. +message OrFilter { + repeated AccessLogFilter filters = 2 [(validate.rules).repeated .min_items = 2]; +} + +// [#not-implemented-hide:] Filters requests based on the presence or value of a request header. +message HeaderFilter { + // Only requests with a header which matches the specified HeaderMatcher will pass the filter + // check. + envoy.api.v2.route.HeaderMatcher header = 1 [(validate.rules).message.required = true]; +} + +// Custom configuration for an AccessLog that writes log entries directly to a file. +// Configures the built-in *envoy.file_access_log* AccessLog. +message FileAccessLog { + // A path to a local file to which to write the access log entries. + string path = 1 [(validate.rules).string.min_bytes = 1]; + + // Access log format. Envoy supports :ref:`custom access log formats + // ` as well as a :ref:`default format + // `. + string format = 2; +} diff --git a/api/envoy/config/filter/fault/v2/BUILD b/api/envoy/config/filter/fault/v2/BUILD new file mode 100644 index 000000000000..0b4310f48e36 --- /dev/null +++ b/api/envoy/config/filter/fault/v2/BUILD @@ -0,0 +1,12 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "fault", + srcs = ["fault.proto"], + visibility = [ + "//envoy/config/filter/http/fault/v2:__pkg__", + "//envoy/config/filter/network/mongo_proxy/v2:__pkg__", + ], +) diff --git a/api/envoy/config/filter/fault/v2/fault.proto b/api/envoy/config/filter/fault/v2/fault.proto new file mode 100644 index 000000000000..d8f06ceef9e7 --- /dev/null +++ b/api/envoy/config/filter/fault/v2/fault.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package envoy.config.filter.fault.v2; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Common fault injection types] + +// Delay specification is used to inject latency into the +// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. +message FaultDelay { + enum FaultDelayType { + // Fixed delay (step function). + FIXED = 0; + } + + // Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is + // supported. + FaultDelayType type = 1 [(validate.rules).enum.defined_only = true]; + + // An integer between 0-100 indicating the percentage of operations/connection requests + // on which the delay will be injected. + uint32 percent = 2 [(validate.rules).uint32.lte = 100]; + + oneof fault_delay_secifier { + option (validate.required) = true; + // Add a fixed delay before forwarding the operation upstream. See + // https://developers.google.com/protocol-buffers/docs/proto3#json for + // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified + // delay will be injected before a new request/operation. For TCP + // connections, the proxying of the connection upstream will be delayed + // for the specified period. This is required if type is FIXED. + google.protobuf.Duration fixed_delay = 3 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + } +} diff --git a/api/envoy/config/filter/http/buffer/v2/BUILD b/api/envoy/config/filter/http/buffer/v2/BUILD new file mode 100644 index 000000000000..d2be36c572c4 --- /dev/null +++ b/api/envoy/config/filter/http/buffer/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "buffer", + srcs = ["buffer.proto"], +) diff --git a/api/envoy/config/filter/http/buffer/v2/buffer.proto b/api/envoy/config/filter/http/buffer/v2/buffer.proto new file mode 100644 index 000000000000..38a1b4206b59 --- /dev/null +++ b/api/envoy/config/filter/http/buffer/v2/buffer.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package envoy.config.filter.http.buffer.v2; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Buffer] +// Buffer :ref:`configuration overview `. + +message Buffer { + // The maximum request size that the filter will buffer before the connection + // manager will stop buffering and return a 413 response. + google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32.gt = 0]; + + // The maximum number of seconds that the filter will wait for a complete + // request before returning a 408 response. + google.protobuf.Duration max_request_time = 2 + [(validate.rules).duration = {required: true, gt: {}}, (gogoproto.stdduration) = true]; +} diff --git a/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD b/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD new file mode 100644 index 000000000000..62e7fc3d6464 --- /dev/null +++ b/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD @@ -0,0 +1,12 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "ext_authz", + srcs = ["ext_authz.proto"], + deps = [ + "//envoy/api/v2/core:grpc_service", + "//envoy/api/v2/core:http_uri", + ], +) diff --git a/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto new file mode 100644 index 000000000000..a9392b855164 --- /dev/null +++ b/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package envoy.config.filter.http.ext_authz.v2alpha; +option go_package = "v2alpha"; + +import "envoy/api/v2/core/grpc_service.proto"; +import "envoy/api/v2/core/http_uri.proto"; + +// The external authorization HTTP service configuration. +message HttpService { + // Sets the HTTP server URI which the authorization requests must be sent to. + envoy.api.v2.core.HttpUri server_uri = 1; + + // Sets an optional prefix to the value of authorization request header `path`. + string path_prefix = 2; +} + +message ExtAuthz { + + oneof services { + // The external authorization gRPC service configuration. + envoy.api.v2.core.GrpcService grpc_service = 1; + + // The external authorization HTTP service configuration. + HttpService http_service = 3; + } + + // The filter's behaviour in case the external authorization service does + // not respond back. If set to true then in case of failure to get a + // response back from the authorization service or getting a response that + // is NOT denied then traffic will be permitted. + // Defaults to false. + bool failure_mode_allow = 2; +} diff --git a/api/envoy/config/filter/http/fault/v2/BUILD b/api/envoy/config/filter/http/fault/v2/BUILD new file mode 100644 index 000000000000..0c517c3e666d --- /dev/null +++ b/api/envoy/config/filter/http/fault/v2/BUILD @@ -0,0 +1,12 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "fault", + srcs = ["fault.proto"], + deps = [ + "//envoy/api/v2/route", + "//envoy/config/filter/fault/v2:fault", + ], +) diff --git a/api/envoy/config/filter/http/fault/v2/fault.proto b/api/envoy/config/filter/http/fault/v2/fault.proto new file mode 100644 index 000000000000..894d2b7846e2 --- /dev/null +++ b/api/envoy/config/filter/http/fault/v2/fault.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package envoy.config.filter.http.fault.v2; +option go_package = "v2"; + +import "envoy/config/filter/fault/v2/fault.proto"; +import "envoy/api/v2/route/route.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Fault Injection] +// Fault Injection :ref:`configuration overview `. + +message FaultAbort { + // An integer between 0-100 indicating the percentage of requests/operations/connections + // that will be aborted with the error code provided. + uint32 percent = 1 [(validate.rules).uint32.lte = 100]; + + oneof error_type { + option (validate.required) = true; + + // HTTP status code to use to abort the HTTP request. + uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}]; + } +} + +message HTTPFault { + // If specified, the filter will inject delays based on the values in the + // object. At least *abort* or *delay* must be specified. + envoy.config.filter.fault.v2.FaultDelay delay = 1; + + // If specified, the filter will abort requests based on the values in + // the object. At least *abort* or *delay* must be specified. + FaultAbort abort = 2; + + // Specifies the name of the (destination) upstream cluster that the + // filter should match on. Fault injection will be restricted to requests + // bound to the specific upstream cluster. + string upstream_cluster = 3; + + // Specifies a set of headers that the filter should match on. The fault + // injection filter can be applied selectively to requests that match a set of + // headers specified in the fault filter config. The chances of actual fault + // injection further depend on the value of the :ref:`percent + // ` field. The filter will + // check the request's headers against all the specified headers in the filter + // config. A match will happen if all the headers in the config are present in + // the request with the same values (or based on presence if the *value* field + // is not in the config). + repeated envoy.api.v2.route.HeaderMatcher headers = 4; + + // Faults are injected for the specified list of downstream hosts. If this + // setting is not set, faults are injected for all downstream nodes. + // Downstream node name is taken from :ref:`the HTTP + // x-envoy-downstream-service-node + // ` header and compared + // against downstream_nodes list. + repeated string downstream_nodes = 5; +} diff --git a/api/envoy/config/filter/http/gzip/v2/BUILD b/api/envoy/config/filter/http/gzip/v2/BUILD new file mode 100644 index 000000000000..e1b592f4aee7 --- /dev/null +++ b/api/envoy/config/filter/http/gzip/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "gzip", + srcs = ["gzip.proto"], +) diff --git a/api/envoy/config/filter/http/gzip/v2/gzip.proto b/api/envoy/config/filter/http/gzip/v2/gzip.proto new file mode 100644 index 000000000000..d98972e936aa --- /dev/null +++ b/api/envoy/config/filter/http/gzip/v2/gzip.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package envoy.config.filter.http.gzip.v2; +option go_package = "v2"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Gzip] +// Gzip :ref:`configuration overview `. + +message Gzip { + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values + // use more memory, but are faster and produce better compression results. The default value is 5. + google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {gte: 1, lte: 9}]; + + // Minimum response length, in bytes, which will trigger compression. The default value is 30. + google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32.gte = 30]; + + message CompressionLevel { + enum Enum { + DEFAULT = 0; + BEST = 1; + SPEED = 2; + } + } + + // A value used for selecting the zlib compression level. This setting will affect speed and + // amount of compression applied to the content. "BEST" provides higher compression at the cost of + // higher latency, "SPEED" provides lower compression with minimum impact on response time. + // "DEFAULT" provides an optimal result between speed and compression. This field will be set to + // "DEFAULT" if not specified. + CompressionLevel.Enum compression_level = 3 [(validate.rules).enum.defined_only = true]; + + enum CompressionStrategy { + DEFAULT = 0; + FILTERED = 1; + HUFFMAN = 2; + RLE = 3; + } + + // A value used for selecting the zlib compression strategy which is directly related to the + // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though + // there are situations which changing this parameter might produce better results. For example, + // run-length encoding (RLE) is typically used when the content is known for having sequences + // which same data occurs many consecutive times. For more information about each strategy, please + // refer to zlib manual. + CompressionStrategy compression_strategy = 4 [(validate.rules).enum.defined_only = true]; + + // Set of strings that allows specifying which mime-types yield compression; e.g., + // application/json, text/html, etc. When this field is not defined, compression will be applied + // to the following mime-types: "application/javascript", "application/json", + // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". + repeated string content_type = 6 [(validate.rules).repeated = {max_items: 50}]; + + // If true, disables compression when the response contains an etag header. When it is false, the + // filter will preserve weak etags and remove the ones that require strong validation. + bool disable_on_etag_header = 7; + + // If true, removes accept-encoding from the request headers before dispatching it to the upstream + // so that responses do not get compressed before reaching the filter. + bool remove_accept_encoding_header = 8; + + // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. + // Larger window results in better compression at the expense of memory usage. The default is 12 + // which will produce a 4096 bytes window. For more details about this parameter, please refer to + // zlib manual > deflateInit2. + google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {gte: 9, lte: 15}]; +} diff --git a/api/envoy/config/filter/http/health_check/v2/BUILD b/api/envoy/config/filter/http/health_check/v2/BUILD new file mode 100644 index 000000000000..78eaa4b55e30 --- /dev/null +++ b/api/envoy/config/filter/http/health_check/v2/BUILD @@ -0,0 +1,19 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "health_check", + srcs = ["health_check.proto"], + deps = [ + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "health_check", + proto = ":health_check", + deps = [ + "//envoy/type:percent_go_proto", + ], +) diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto new file mode 100644 index 000000000000..185a9117f240 --- /dev/null +++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package envoy.config.filter.http.health_check.v2; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "envoy/type/percent.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Health check] +// Health check :ref:`configuration overview `. + +message HealthCheck { + // Specifies whether the filter operates in pass through mode or not. + google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message.required = true]; + + // Specifies the incoming HTTP endpoint that should be considered the + // health check endpoint. For example */healthcheck*. + string endpoint = 2 [(validate.rules).string.min_bytes = 1]; + + // If operating in pass through mode, the amount of time in milliseconds + // that the filter should cache the upstream response. + google.protobuf.Duration cache_time = 3 [(gogoproto.stdduration) = true]; + + // If operating in non-pass-through mode, specifies a set of upstream cluster + // names and the minimum percentage of servers in each of those clusters that + // must be healthy in order for the filter to return a 200. + map cluster_min_healthy_percentages = 4; +} diff --git a/api/envoy/config/filter/http/ip_tagging/v2/BUILD b/api/envoy/config/filter/http/ip_tagging/v2/BUILD new file mode 100644 index 000000000000..147693b86c08 --- /dev/null +++ b/api/envoy/config/filter/http/ip_tagging/v2/BUILD @@ -0,0 +1,9 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "ip_tagging", + srcs = ["ip_tagging.proto"], + deps = ["//envoy/api/v2/core:address"], +) diff --git a/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto new file mode 100644 index 000000000000..be516f77bc6d --- /dev/null +++ b/api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package envoy.config.filter.http.ip_tagging.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/address.proto"; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +message IPTagging { + + // The type of requests the filter should apply to. The supported types + // are internal, external or both. The + // :ref:`x-forwarded-for` header is + // used to determine if a request is internal and will result in + // :ref:`x-envoy-internal` + // being set. The filter defaults to both, and it will apply to all request types. + enum RequestType { + // Both external and internal requests will be tagged. This is the default value. + BOTH = 0; + + // Only internal requests will be tagged. + INTERNAL = 1; + + // Only external requests will be tagged. + EXTERNAL = 2; + } + + // The type of request the filter should apply to. + RequestType request_type = 1 [(validate.rules).enum.defined_only = true]; + + // Supplies the IP tag name and the IP address subnets. + message IPTag { + // Specifies the IP tag name to apply. + string ip_tag_name = 1; + + // A list of IP address subnets that will be tagged with + // ip_tag_name. Both IPv4 and IPv6 are supported. + repeated envoy.api.v2.core.CidrRange ip_list = 2; + } + + // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. + // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] + // The set of IP tags for the filter. + repeated IPTag ip_tags = 4 [(validate.rules).repeated .min_items = 1]; +} diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD new file mode 100644 index 000000000000..cc07bd29bdda --- /dev/null +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD @@ -0,0 +1,13 @@ +licenses(["notice"]) # Apache 2 + +load("//bazel:api_build_system.bzl", "api_proto_library") + +api_proto_library( + name = "jwt_authn", + srcs = ["config.proto"], + deps = [ + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:http_uri", + "//envoy/api/v2/route", + ], +) diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md new file mode 100644 index 000000000000..d7aac1ad98ef --- /dev/null +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/README.md @@ -0,0 +1,31 @@ +# JWT Authentication HTTP filter config + +## Overview + +1. The proto file in this folder defines an HTTP filter config for "jwt_authn" filter. + +2. This filter will verify the JWT in the HTTP request as: + - The signature should be valid + - JWT should not be expired + - Issuer and audiences are valid and specified in the filter config. + +3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter. + +3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message. + +## The locations to extract JWT + +JWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header: +``` +Authorization: Bearer +``` +The next default location is in the query parameter as: +``` +?access_token= +``` + +If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. + +## HTTP header to pass sucessfully verified JWT + +If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64 encoded JWT payload in JSON. diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto new file mode 100644 index 000000000000..adab17168ddb --- /dev/null +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -0,0 +1,226 @@ + +syntax = "proto3"; + +package envoy.config.filter.http.jwt_authn.v2alpha; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/http_uri.proto"; +import "envoy/api/v2/route/route.proto"; +import "google/protobuf/duration.proto"; +import "validate/validate.proto"; + +// This message specifies how a JSON Web Token (JWT) can be verified. JWT format is defined +// `here `_. Please see `OAuth2.0 +// `_ and `OIDC1.0 `_ for +// the authentication flow. +// +// Example: +// +// .. code-block:: yaml +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// bookstore_web.apps.googleusercontent.com +// remote_jwks: +// - http_uri: +// - uri: https://example.com/.well-known/jwks.json +// cluster: example_jwks_cluster +// cache_duration: +// - seconds: 300 +// +// [#not-implemented-hide:] +message JwtRule { + // Identifies the principal that issued the JWT. See `here + // `_. Usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + // + string issuer = 1 [(validate.rules).string.min_bytes = 1]; + + // The list of JWT `audiences `_. that are + // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, + // will not check audiences in the token. + // + // Example: + // + // .. code-block:: yaml + // + // audiences: + // - bookstore_android.apps.googleusercontent.com + // bookstore_web.apps.googleusercontent.com + // + repeated string audiences = 2; + + // `JSON Web Key Set `_ is needed. to validate + // signature of the JWT. This field specifies where to fetch JWKS. + oneof jwks_source_specifier { + option (validate.required) = true; + + // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP + // URI and how the fetched JWKS should be cached. + // + // Example: + // + // .. code-block:: yaml + // + // remote_jwks: + // - http_uri: + // - uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // cache_duration: + // - seconds: 300 + // + RemoteJwks remote_jwks = 3; + + // JWKS is in local data source. It could be either in a local file or embedded in the + // inline_string. + // + // Example: local file + // + // .. code-block:: yaml + // + // local_jwks: + // - filename: /etc/envoy/jwks/jwks1.txt + // + // Example: inline_string + // + // .. code-block:: yaml + // + // local_jwks: + // - inline_string: "ACADADADADA" + // + envoy.api.v2.core.DataSource local_jwks = 4; + } + + // If false, the JWT is removed in the request after a success verification. If true, the JWT is + // not removed in the request. Default value is false. + bool forward = 5; + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified, the following default locations are tried in order: + // + // 1. The Authorization header using the Bearer schema. See `here + // `_. Example: + // + // Authorization: Bearer . + // + // 2. `access_token` query parameter. See `this + // `_ + // + + // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations + // its issuer specified or from the default locations. + + // Specify the HTTP headers to extract JWT token. For examples, following config: + // + // .. code-block:: yaml + // + // from_headers: + // - name: x-goog-iap-jwt-assertion + // + // can be used to extract token from header:: + // + // x-goog-iap-jwt-assertion: . + // + repeated JwtHeader from_headers = 6; + + // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. + // + // For example, if config is: + // + // .. code-block:: yaml + // + // from_params: + // - jwt_token + // + // The JWT format in query parameter is:: + // + // /path?jwt_token= + // + repeated string from_params = 7; + + // This field specifies the header name to forward a successfully verified JWT payload to the + // backend. The forwarded data is:: + // + // base64_encoded(jwt_payload_in_JSON) + // + // If it is not specified, the payload will not be forwarded. + // Multiple JWTs in a request from different issuers will be supported. Multiple JWTs from the + // same issuer will not be supported. Each issuer can config this `forward_payload_header`. If + // multiple JWTs from different issuers want to forward their payloads, their + // `forward_payload_header` should be different. + string forward_payload_header = 8; +} + +// This message specifies how to fetch JWKS from remote and how to cache it. +message RemoteJwks { + // The HTTP URI to fetch the JWKS. For example: + // + // .. code-block:: yaml + // + // http_uri: + // - uri: https://www.googleapis.com/oauth2/v1/certs + // cluster: jwt.www.googleapis.com|443 + // + envoy.api.v2.core.HttpUri http_uri = 1; + + // Duration after which the cached JWKS should be expired. If not specified, default cache + // duration is 5 minutes. + google.protobuf.Duration cache_duration = 2; +} + +// This message specifies a header location to extract JWT token. +message JwtHeader { + // The HTTP header name. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // The value prefix. The value format is "value_prefix" + // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the + // end. + string value_prefix = 2; +} + +// This is the Envoy HTTP filter config for JWT authentication. +// [#not-implemented-hide:] +message JwtAuthentication { + // List of JWT rules to valide. + repeated JwtRule rules = 1; + + // If true, the request is allowed if JWT is missing or JWT verification fails. + // Default is false, a request without JWT or failed JWT verification is not allowed. + bool allow_missing_or_failed = 2; + + // This field lists the patterns allowed to bypass JWT verification. This only applies when + // `allow_missing_or_failed_jwt` is false. Under this config, if a request doesn't have JWT, it + // will be rejected. But some requests still needed to be forwarded without JWT, such as OPTIONS + // for CORS and some health checking paths. + // + // Examples: bypass all CORS options requests + // + // .. code-block:: yaml + // + // bypass: + // - headers: + // - name: :method + // value: OPTIONS + // - headers: + // - name: :path + // regex_match: /.* + // + // Examples: bypass /healthz check + // + // .. code-block:: yaml + // + // bypass: + // - headers: + // - name: :method + // value: GET + // - headers: + // - name: :path + // exact_match: /healthz + // + repeated envoy.api.v2.route.RouteMatch bypass = 3; +} diff --git a/api/envoy/config/filter/http/lua/v2/BUILD b/api/envoy/config/filter/http/lua/v2/BUILD new file mode 100644 index 000000000000..ce571d9720db --- /dev/null +++ b/api/envoy/config/filter/http/lua/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "lua", + srcs = ["lua.proto"], +) diff --git a/api/envoy/config/filter/http/lua/v2/lua.proto b/api/envoy/config/filter/http/lua/v2/lua.proto new file mode 100644 index 000000000000..f42f1b6ef704 --- /dev/null +++ b/api/envoy/config/filter/http/lua/v2/lua.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package envoy.config.filter.http.lua.v2; +option go_package = "v2"; + +import "validate/validate.proto"; + +// [#protodoc-title: Lua] +// Lua :ref:`configuration overview `. + +message Lua { + // The Lua code that Envoy will execute. This can be a very small script that + // further loads code from disk if desired. Note that if JSON configuration is used, the code must + // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line + // strings so complex scripts can be easily expressed inline in the configuration. + string inline_code = 1 [(validate.rules).string.min_bytes = 1]; +} diff --git a/api/envoy/config/filter/http/rate_limit/v2/BUILD b/api/envoy/config/filter/http/rate_limit/v2/BUILD new file mode 100644 index 000000000000..484e19c40d32 --- /dev/null +++ b/api/envoy/config/filter/http/rate_limit/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "rate_limit", + srcs = ["rate_limit.proto"], +) diff --git a/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto new file mode 100644 index 000000000000..9d3f50e7542b --- /dev/null +++ b/api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.config.filter.http.rate_limit.v2; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Rate limit] +// Rate limit :ref:`configuration overview `. + +message RateLimit { + // The rate limit domain to use when calling the rate limit service. + string domain = 1 [(validate.rules).string.min_bytes = 1]; + + // Specifies the rate limit configurations to be applied with the same + // stage number. If not set, the default stage number is 0. + // + // .. note:: + // + // The filter supports a range of 0 - 10 inclusively for stage numbers. + uint32 stage = 2 [(validate.rules).uint32.lte = 10]; + + // The type of requests the filter should apply to. The supported + // types are *internal*, *external* or *both*. A request is considered internal if + // :ref:`x-envoy-internal` is set to true. If + // :ref:`x-envoy-internal` is not set or false, a + // request is considered external. The filter defaults to *both*, and it will apply to all request + // types. + string request_type = 3; + + // The timeout in milliseconds for the rate limit service RPC. If not + // set, this defaults to 20ms. + google.protobuf.Duration timeout = 4 [(gogoproto.stdduration) = true]; +} diff --git a/api/envoy/config/filter/http/router/v2/BUILD b/api/envoy/config/filter/http/router/v2/BUILD new file mode 100644 index 000000000000..00392ac7f98a --- /dev/null +++ b/api/envoy/config/filter/http/router/v2/BUILD @@ -0,0 +1,9 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "router", + srcs = ["router.proto"], + deps = ["//envoy/config/filter/accesslog/v2:accesslog"], +) diff --git a/api/envoy/config/filter/http/router/v2/router.proto b/api/envoy/config/filter/http/router/v2/router.proto new file mode 100644 index 000000000000..a334c62c48e6 --- /dev/null +++ b/api/envoy/config/filter/http/router/v2/router.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.config.filter.http.router.v2; +option go_package = "v2"; + +import "envoy/config/filter/accesslog/v2/accesslog.proto"; + +import "google/protobuf/wrappers.proto"; + +// [#protodoc-title: Router] +// Router :ref:`configuration overview `. + +message Router { + // Whether the router generates dynamic cluster statistics. Defaults to + // true. Can be disabled in high performance scenarios. + google.protobuf.BoolValue dynamic_stats = 1; + + // Whether to start a child span for egress routed calls. This can be + // useful in scenarios where other filters (auth, ratelimit, etc.) make + // outbound calls and have child spans rooted at the same ingress + // parent. Defaults to false. + bool start_child_span = 2; + + // Configuration for HTTP upstream logs emitted by the router. Upstream logs + // are configured in the same way as access logs, but each log entry represents + // an upstream request. Presuming retries are configured, multiple upstream + // requests may be made for each downstream (inbound) request. + repeated envoy.config.filter.accesslog.v2.AccessLog upstream_log = 3; +} diff --git a/api/envoy/config/filter/http/squash/v2/BUILD b/api/envoy/config/filter/http/squash/v2/BUILD new file mode 100644 index 000000000000..ea5e9c6c4c15 --- /dev/null +++ b/api/envoy/config/filter/http/squash/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "squash", + srcs = ["squash.proto"], +) diff --git a/api/envoy/config/filter/http/squash/v2/squash.proto b/api/envoy/config/filter/http/squash/v2/squash.proto new file mode 100644 index 000000000000..a1455417b75a --- /dev/null +++ b/api/envoy/config/filter/http/squash/v2/squash.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package envoy.config.filter.http.squash.v2; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Squash] +// Squash :ref:`configuration overview `. + +// [#proto-status: experimental] +message Squash { + // The name of the cluster that hosts the Squash server. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // When the filter requests the Squash server to create a DebugAttachment, it will use this + // structure as template for the body of the request. It can contain reference to environment + // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server + // with more information to find the process to attach the debugger to. For example, in a + // Istio/k8s environment, this will contain information on the pod: + // + // .. code-block:: json + // + // { + // "spec": { + // "attachment": { + // "pod": "{{ POD_NAME }}", + // "namespace": "{{ POD_NAMESPACE }}" + // }, + // "match_request": true + // } + // } + // + // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) + google.protobuf.Struct attachment_template = 2; + + // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. + google.protobuf.Duration request_timeout = 3 [(gogoproto.stdduration) = true]; + + // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 + // seconds. + google.protobuf.Duration attachment_timeout = 4 [(gogoproto.stdduration) = true]; + + // Amount of time to poll for the status of the attachment object in the Squash server + // (to check if has been attached). Defaults to 1 second. + google.protobuf.Duration attachment_poll_period = 5 [(gogoproto.stdduration) = true]; +} diff --git a/api/envoy/config/filter/http/transcoder/v2/BUILD b/api/envoy/config/filter/http/transcoder/v2/BUILD new file mode 100644 index 000000000000..087f8ce8cefb --- /dev/null +++ b/api/envoy/config/filter/http/transcoder/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "transcoder", + srcs = ["transcoder.proto"], +) diff --git a/api/envoy/config/filter/http/transcoder/v2/transcoder.proto b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto new file mode 100644 index 000000000000..7a52c9a48e27 --- /dev/null +++ b/api/envoy/config/filter/http/transcoder/v2/transcoder.proto @@ -0,0 +1,63 @@ +syntax = "proto3"; + +package envoy.config.filter.http.transcoder.v2; +option go_package = "v2"; + +import "validate/validate.proto"; + +// [#protodoc-title: gRPC-JSON transcoder] +// gRPC-JSON transcoder :ref:`configuration overview `. + +message GrpcJsonTranscoder { + oneof descriptor_set { + option (validate.required) = true; + + // Supplies the filename of + // :ref:`the proto descriptor set ` for the gRPC + // services. + string proto_descriptor = 1; + + // Supplies the binary content of + // :ref:`the proto descriptor set ` for the gRPC + // services. + bytes proto_descriptor_bin = 4; + } + + // A list of strings that supplies the service names that the + // transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, Envoy + // will fail at startup. The ``proto_descriptor`` may contain more services than the service names + // specified here, but they won't be translated. + repeated string services = 2 [(validate.rules).repeated .min_items = 1]; + + message PrintOptions { + // Whether to add spaces, line breaks and indentation to make the JSON + // output easy to read. Defaults to false. + bool add_whitespace = 1; + + // Whether to always print primitive fields. By default primitive + // fields with default values will be omitted in JSON output. For + // example, an int32 field set to 0 will be omitted. Setting this flag to + // true will override the default behavior and print primitive fields + // regardless of their values. Defaults to false. + bool always_print_primitive_fields = 2; + + // Whether to always print enums as ints. By default they are rendered + // as strings. Defaults to false. + bool always_print_enums_as_ints = 3; + + // Whether to preserve proto field names. By default protobuf will + // generate JSON field names using the ``json_name`` option, or lower camel case, + // in that order. Setting this flag will preserve the original field names. Defaults to false. + bool preserve_proto_field_names = 4; + }; + + // Control options for response JSON. These options are passed directly to + // `JsonPrintOptions `_. + PrintOptions print_options = 3; + + // Whether to keep the incoming request route after the outgoing headers have been transformed to + // the match the upstream gRPC service. Note: This means that routes for gRPC services that are + // not transcoded cannot be used in combination with *match_incoming_request_route*. + bool match_incoming_request_route = 5; +} diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD new file mode 100644 index 000000000000..d382848c9239 --- /dev/null +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD @@ -0,0 +1,9 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "client_ssl_auth", + srcs = ["client_ssl_auth.proto"], + deps = ["//envoy/api/v2/core:address"], +) diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto new file mode 100644 index 000000000000..7d0321cf8faf --- /dev/null +++ b/api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.config.filter.network.client_ssl_auth.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/address.proto"; +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Client TLS authentication] +// Client TLS authentication +// :ref:`configuration overview `. + +message ClientSSLAuth { + // The :ref:`cluster manager ` cluster that runs + // the authentication service. The filter will connect to the service every 60s to fetch the list + // of principals. The service must support the expected :ref:`REST API + // `. + string auth_api_cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + + // Time in milliseconds between principal refreshes from the + // authentication service. Default is 60000 (60s). The actual fetch time + // will be this value plus a random jittered value between + // 0-refresh_delay_ms milliseconds. + google.protobuf.Duration refresh_delay = 3 [(gogoproto.stdduration) = true]; + + // An optional list of IP address and subnet masks that should be white + // listed for access by the filter. If no list is provided, there is no + // IP white list. + repeated envoy.api.v2.core.CidrRange ip_white_list = 4; +} diff --git a/api/envoy/config/filter/network/ext_authz/v2/BUILD b/api/envoy/config/filter/network/ext_authz/v2/BUILD new file mode 100644 index 000000000000..22dc891526f9 --- /dev/null +++ b/api/envoy/config/filter/network/ext_authz/v2/BUILD @@ -0,0 +1,9 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "ext_authz", + srcs = ["ext_authz.proto"], + deps = ["//envoy/api/v2/core:grpc_service"], +) diff --git a/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto new file mode 100644 index 000000000000..f6716386fe75 --- /dev/null +++ b/api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.filter.network.ext_authz.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// [#not-implemented-hide:] +// External Authorization filter calls out to an external service over the +// gRPC Authorization API defined by +// :ref:`external_auth `. +// A failed check will cause this filter to close the TCP connection. +message ExtAuthz { + // The prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The external authorization gRPC service configuration. + envoy.api.v2.core.GrpcService grpc_service = 2; + + // The filter's behaviour in case the external authorization service does + // not respond back. If set to true then in case of failure to get a + // response back from the authorization service or getting a response that + // is NOT denied then traffic will be permitted. + // Defaults to false. + bool failure_mode_allow = 3; +} diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD new file mode 100644 index 000000000000..0e7188500c35 --- /dev/null +++ b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD @@ -0,0 +1,29 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "http_connection_manager", + srcs = ["http_connection_manager.proto"], + deps = [ + "//envoy/api/v2:rds", + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:config_source", + "//envoy/api/v2/core:protocol", + "//envoy/config/filter/accesslog/v2:accesslog", + "//envoy/type:percent", + ], +) + +api_go_proto_library( + name = "http_connection_manager", + proto = ":http_connection_manager", + deps = [ + "//envoy/api/v2:rds_go_grpc", + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:config_source_go_proto", + "//envoy/api/v2/core:protocol_go_proto", + "//envoy/config/filter/accesslog/v2:accesslog_go_proto", + "//envoy/type:percent_go_proto", + ], +) diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto new file mode 100644 index 000000000000..1c573cc053d0 --- /dev/null +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -0,0 +1,306 @@ +syntax = "proto3"; + +package envoy.config.filter.network.http_connection_manager.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/config_source.proto"; +import "envoy/api/v2/core/protocol.proto"; +import "envoy/api/v2/rds.proto"; +import "envoy/config/filter/accesslog/v2/accesslog.proto"; +import "envoy/type/percent.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: HTTP connection manager] +// HTTP connection manager :ref:`configuration overview `. + +// [#comment:next free field: 21] +message HttpConnectionManager { + enum CodecType { + option (gogoproto.goproto_enum_prefix) = false; + + // For every new connection, the connection manager will determine which + // codec to use. This mode supports both ALPN for TLS listeners as well as + // protocol inference for plaintext listeners. If ALPN data is available, it + // is preferred, otherwise protocol inference is used. In almost all cases, + // this is the right option to choose for this setting. + AUTO = 0; + + // The connection manager will assume that the client is speaking HTTP/1.1. + HTTP1 = 1; + + // The connection manager will assume that the client is speaking HTTP/2 + // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + // Prior knowledge is allowed). + HTTP2 = 2; + } + + // Supplies the type of codec that the connection manager should use. + CodecType codec_type = 1 [(validate.rules).enum.defined_only = true]; + + // The human readable prefix to use when emitting statistics for the + // connection manager. See the :ref:`statistics documentation ` for + // more information. + string stat_prefix = 2 [(validate.rules).string.min_bytes = 1]; + + oneof route_specifier { + option (validate.required) = true; + + // The connection manager’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the connection manager is static and is specified in this property. + envoy.api.v2.RouteConfiguration route_config = 4; + } + + // A list of individual HTTP filters that make up the filter chain for + // requests made to the connection manager. Order matters as the filters are + // processed sequentially as request events happen. + repeated HttpFilter http_filters = 5; + + // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + // documentation for more information. Defaults to false. + google.protobuf.BoolValue add_user_agent = 6; + + message Tracing { + enum OperationName { + option (gogoproto.goproto_enum_prefix) = false; + + // The HTTP listener is used for ingress/incoming requests. + INGRESS = 0; + + // The HTTP listener is used for egress/outgoing requests. + EGRESS = 1; + } + + // The span name will be derived from this field. + OperationName operation_name = 1 [(validate.rules).enum.defined_only = true]; + + // A list of header names used to create tags for the active span. The header name is used to + // populate the tag name, and the header value is used to populate the tag value. The tag is + // created if the specified header name is present in the request's headers. + repeated string request_headers_for_tags = 2; + + // Target percentage of requests managed by this HTTP connection manager that will be force + // traced if the :ref:`x-client-trace-id ` + // header is set. This field is a direct analog for the runtime variable + // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + // `. + // Default: 100% + envoy.type.Percent client_sampling = 3; + + // Target percentage of requests managed by this HTTP connection manager that will be randomly + // selected for trace generation, if not requested by the client or not forced. This field is + // a direct analog for the runtime variable 'tracing.random_sampling' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + envoy.type.Percent random_sampling = 4; + + // Target percentage of requests managed by this HTTP connection manager that will be traced + // after all other sampling checks have been applied (client-directed, force tracing, random + // sampling). This field functions as an upper limit on the total configured sampling rate. For + // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + // of client requests with the appropriate headers to be force traced. This field is a direct + // analog for the runtime variable 'tracing.global_enabled' in the + // :ref:`HTTP Connection Manager `. + // Default: 100% + envoy.type.Percent overall_sampling = 5; + } + + // Presence of the object defines whether the connection manager + // emits :ref:`tracing ` data to the :ref:`configured tracing provider + // `. + Tracing tracing = 7; + + // Additional HTTP/1 settings that are passed to the HTTP/1 codec. + envoy.api.v2.core.Http1ProtocolOptions http_protocol_options = 8; + + // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + envoy.api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; + + // An optional override that the connection manager will write to the server + // header in responses. If not set, the default is *envoy*. + string server_name = 10; + + // The idle timeout for connections managed by the connection manager. The + // idle timeout is defined as the period in which there are no active + // requests. If not set, there is no idle timeout. When the idle timeout is + // reached the connection will be closed. If the connection is an HTTP/2 + // connection a drain sequence will occur prior to closing the connection. See + // :ref:`drain_timeout + // `. + google.protobuf.Duration idle_timeout = 11 [(gogoproto.stdduration) = true]; + + // The time that Envoy will wait between sending an HTTP/2 “shutdown + // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. + // This is used so that Envoy provides a grace period for new streams that + // race with the final GOAWAY frame. During this grace period, Envoy will + // continue to accept new streams. After the grace period, a final GOAWAY + // frame is sent and Envoy will start refusing new streams. Draining occurs + // both when a connection hits the idle timeout or during general server + // draining. The default grace period is 5000 milliseconds (5 seconds) if this + // option is not specified. + google.protobuf.Duration drain_timeout = 12 [(gogoproto.stdduration) = true]; + + // Configuration for :ref:`HTTP access logs ` + // emitted by the connection manager. + repeated envoy.config.filter.accesslog.v2.AccessLog access_log = 13; + + // If set to true, the connection manager will use the real remote address + // of the client connection when determining internal versus external origin and manipulating + // various headers. If set to false or absent, the connection manager will use the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for`, + // :ref:`config_http_conn_man_headers_x-envoy-internal`, and + // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + google.protobuf.BoolValue use_remote_address = 14; + + // The number of additional ingress proxy hops from the right side of the + // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + // determining the origin client's IP address. The default is zero if this option + // is not specified. See the documentation for + // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + uint32 xff_num_trusted_hops = 19; + + // of the client connection as the origin client address if *use_remote_address* is true + // Whether the connection manager will generate the :ref:`x-request-id + // ` header if it does not exist. This defaults to + // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + // is not desired it can be disabled. + google.protobuf.BoolValue generate_request_id = 15; + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + enum ForwardClientCertDetails { + option (gogoproto.goproto_enum_prefix) = false; + + // Do not send the XFCC header to the next hop. This is the default value. + SANITIZE = 0; + + // When the client connection is mTLS (Mutual TLS), forward the XFCC header + // in the request. + FORWARD_ONLY = 1; + + // When the client connection is mTLS, append the client certificate + // information to the request’s XFCC header and forward it. + APPEND_FORWARD = 2; + + // When the client connection is mTLS, reset the XFCC header with the client + // certificate information and send it to the next hop. + SANITIZE_SET = 3; + + // Always forward the XFCC header in the request, regardless of whether the + // client connection is mTLS. + ALWAYS_FORWARD_ONLY = 4; + }; + + // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + // header. + ForwardClientCertDetails forward_client_cert_details = 16 + [(validate.rules).enum.defined_only = true]; + + message SetCurrentClientCertDetails { + // Whether to forward the subject of the client cert. Defaults to false. + google.protobuf.BoolValue subject = 1; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. This field is deprecated, use URI field instead. + google.protobuf.BoolValue san = 2 [deprecated = true]; + + // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + // XFCC header comma separated from other values with the value Cert="PEM". + // Defaults to false. + bool cert = 3; + + // Whether to forward the DNS type Subject Alternative Names of the client cert. + // Defaults to false. + bool dns = 4; + + // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + // false. + bool uri = 5; + }; + + // This field is valid only when :ref:`forward_client_cert_details + // ` + // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + // the client certificate to be forwarded. Note that in the + // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and + // *By* is always set when the client certificate presents the URI type Subject Alternative Name + // value. + SetCurrentClientCertDetails set_current_client_cert_details = 17; + + // If proxy_100_continue is true, Envoy will proxy incoming "Expect: + // 100-continue" headers upstream, and forward "100 Continue" responses + // downstream. If this is false or not set, Envoy will instead strip the + // "Expect: 100-continue" header, and send a "100 Continue" response itself. + bool proxy_100_continue = 18; + + // If + // :ref:`use_remote_address + // ` + // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. + // This is useful for testing compatibility of upstream services that parse the header value. For + // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + // `_ for details. This will also affect the + // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + // ` for runtime + // control. + bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; +} + +message Rds { + // Configuration source specifier for RDS. + envoy.api.v2.core.ConfigSource config_source = 1 + [(validate.rules).message.required = true, (gogoproto.nullable) = false]; + + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2 [(validate.rules).string.min_bytes = 1]; +} + +message HttpFilter { + // The name of the filter to instantiate. The name must match a supported + // filter. The built-in filters are: + // + // [#comment:TODO(mattklein123): Auto generate the following list] + // * :ref:`envoy.buffer ` + // * :ref:`envoy.cors ` + // * :ref:`envoy.fault ` + // * :ref:`envoy.gzip ` + // * :ref:`envoy.http_dynamo_filter ` + // * :ref:`envoy.grpc_http1_bridge ` + // * :ref:`envoy.grpc_json_transcoder ` + // * :ref:`envoy.grpc_web ` + // * :ref:`envoy.health_check ` + // * :ref:`envoy.ip_tagging ` + // * :ref:`envoy.lua ` + // * :ref:`envoy.rate_limit ` + // * :ref:`envoy.router ` + // * :ref:`envoy.squash ` + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Struct config = 2; + + // [#not-implemented-hide:] + // This is hidden as type has been deprecated and is no longer required. + message DeprecatedV1 { + string type = 1; + } + + // [#not-implemented-hide:] + // This is hidden as type has been deprecated and is no longer required. + DeprecatedV1 deprecated_v1 = 3 [deprecated = true]; +} diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD new file mode 100644 index 000000000000..03bc30347675 --- /dev/null +++ b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD @@ -0,0 +1,9 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "mongo_proxy", + srcs = ["mongo_proxy.proto"], + deps = ["//envoy/config/filter/fault/v2:fault"], +) diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto new file mode 100644 index 000000000000..0d7297333083 --- /dev/null +++ b/api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package envoy.config.filter.network.mongo_proxy.v2; +option go_package = "v2"; + +import "envoy/config/filter/fault/v2/fault.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Mongo proxy] +// MongoDB :ref:`configuration overview `. + +message MongoProxy { + // The human readable prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The optional path to use for writing Mongo access logs. If not access log + // path is specified no access logs will be written. Note that access log is + // also gated :ref:`runtime `. + string access_log = 2; + + // Inject a fixed delay before proxying a Mongo operation. Delays are + // applied to the following MongoDB operations: Query, Insert, GetMore, + // and KillCursors. Once an active delay is in progress, all incoming + // data up until the timer event fires will be a part of the delay. + envoy.config.filter.fault.v2.FaultDelay delay = 3; +} diff --git a/api/envoy/config/filter/network/rate_limit/v2/BUILD b/api/envoy/config/filter/network/rate_limit/v2/BUILD new file mode 100644 index 000000000000..b1936e3bb2c2 --- /dev/null +++ b/api/envoy/config/filter/network/rate_limit/v2/BUILD @@ -0,0 +1,9 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "rate_limit", + srcs = ["rate_limit.proto"], + deps = ["//envoy/api/v2/ratelimit"], +) diff --git a/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto new file mode 100644 index 000000000000..240129499ace --- /dev/null +++ b/api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package envoy.config.filter.network.rate_limit.v2; +option go_package = "v2"; + +import "envoy/api/v2/ratelimit/ratelimit.proto"; +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Rate limit] +// Rate limit :ref:`configuration overview `. + +message RateLimit { + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The rate limit domain to use in the rate limit service request. + string domain = 2 [(validate.rules).string.min_bytes = 1]; + + // The rate limit descriptor list to use in the rate limit service request. + repeated envoy.api.v2.ratelimit.RateLimitDescriptor descriptors = 3 + [(validate.rules).repeated .min_items = 1]; + + // The timeout in milliseconds for the rate limit service RPC. If not + // set, this defaults to 20ms. + google.protobuf.Duration timeout = 4 [(gogoproto.stdduration) = true]; +} diff --git a/api/envoy/config/filter/network/redis_proxy/v2/BUILD b/api/envoy/config/filter/network/redis_proxy/v2/BUILD new file mode 100644 index 000000000000..78f269301fe0 --- /dev/null +++ b/api/envoy/config/filter/network/redis_proxy/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "redis_proxy", + srcs = ["redis_proxy.proto"], +) diff --git a/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto new file mode 100644 index 000000000000..4d5220136458 --- /dev/null +++ b/api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package envoy.config.filter.network.redis_proxy.v2; +option go_package = "v2"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: Redis Proxy] +// Redis Proxy :ref:`configuration overview `. + +message RedisProxy { + // The prefix to use when emitting :ref:`statistics `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // Name of cluster from cluster manager. See the :ref:`configuration section + // ` of the architecture overview for recommendations on + // configuring the backing cluster. + string cluster = 2 [(validate.rules).string.min_bytes = 1]; + + // Redis connection pool settings. + message ConnPoolSettings { + // Per-operation timeout in milliseconds. The timer starts when the first + // command of a pipeline is written to the backend connection. Each response received from Redis + // resets the timer since it signifies that the next command is being processed by the backend. + // The only exception to this behavior is when a connection to a backend is not yet established. + // In that case, the connect timeout on the cluster will govern the timeout until the connection + // is ready. + google.protobuf.Duration op_timeout = 1 + [(validate.rules).duration.required = true, (gogoproto.stdduration) = true]; + } + + // Network settings for the connection pool to the upstream cluster. + ConnPoolSettings settings = 3 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD new file mode 100644 index 000000000000..2e7296fa3f96 --- /dev/null +++ b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD @@ -0,0 +1,13 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "tcp_proxy", + srcs = ["tcp_proxy.proto"], + deps = [ + "//envoy/api/v2/core:address", + "//envoy/api/v2/core:base", + "//envoy/config/filter/accesslog/v2:accesslog", + ], +) diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto new file mode 100644 index 000000000000..d09952daf31f --- /dev/null +++ b/api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; + +package envoy.config.filter.network.tcp_proxy.v2; +option go_package = "v2"; + +import "envoy/config/filter/accesslog/v2/accesslog.proto"; +import "envoy/api/v2/core/address.proto"; +import "envoy/api/v2/core/base.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +// [#protodoc-title: TCP Proxy] +// TCP Proxy :ref:`configuration overview `. + +message TcpProxy { + // The prefix to use when emitting :ref:`statistics + // `. + string stat_prefix = 1 [(validate.rules).string.min_bytes = 1]; + + // The upstream cluster to connect to. + // + // .. note:: + // + // Once full filter chain matching is implemented in listeners, this field will become the only + // way to configure the target cluster. All other matching will be done via :ref:`filter chain + // matching rules `. For very simple configurations, + // this field can still be used to select the cluster when no other matching rules are required. + // Otherwise, a :ref:`deprecated_v1 + // ` configuration is + // required to use more complex routing in the interim. + // + string cluster = 2; + + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as *envoy.lb*. + envoy.api.v2.core.Metadata metadata_match = 9; + + // The idle timeout for connections managed by the TCP proxy filter. The idle timeout + // is defined as the period in which there are no bytes sent or received on either + // the upstream or downstream connection. If not set, connections will never be closed + // by the TCP proxy due to being idle. + google.protobuf.Duration idle_timeout = 8 + [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true]; + + // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy + // filter. The idle timeout is defined as the period in which there is no + // active traffic. If not set, there is no idle timeout. When the idle timeout + // is reached the connection will be closed. The distinction between + // downstream_idle_timeout/upstream_idle_timeout provides a means to set + // timeout based on the last byte sent on the downstream/upstream connection. + google.protobuf.Duration downstream_idle_timeout = 3; + + // [#not-implemented-hide:] + google.protobuf.Duration upstream_idle_timeout = 4; + + // Configuration for :ref:`access logs ` + // emitted by the this tcp_proxy. + repeated envoy.config.filter.accesslog.v2.AccessLog access_log = 5; + + // TCP Proxy filter configuration using V1 format, until Envoy gets the + // ability to match source/destination at the listener level (called + // :ref:`filter chain match `). + message DeprecatedV1 { + // A TCP proxy route consists of a set of optional L4 criteria and the + // name of a cluster. If a downstream connection matches all the + // specified criteria, the cluster in the route is used for the + // corresponding upstream connection. Routes are tried in the order + // specified until a match is found. If no match is found, the connection + // is closed. A route with no criteria is valid and always produces a + // match. + message TCPRoute { + // The cluster to connect to when a the downstream network connection + // matches the specified criteria. + string cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // An optional list of IP address subnets in the form + // “ip_address/xx”. The criteria is satisfied if the destination IP + // address of the downstream connection is contained in at least one of + // the specified subnets. If the parameter is not specified or the list + // is empty, the destination IP address is ignored. The destination IP + // address of the downstream connection might be different from the + // addresses on which the proxy is listening if the connection has been + // redirected. + repeated envoy.api.v2.core.CidrRange destination_ip_list = 2; + + // An optional string containing a comma-separated list of port numbers + // or ranges. The criteria is satisfied if the destination port of the + // downstream connection is contained in at least one of the specified + // ranges. If the parameter is not specified, the destination port is + // ignored. The destination port address of the downstream connection + // might be different from the port on which the proxy is listening if + // the connection has been redirected. + string destination_ports = 3; + + // An optional list of IP address subnets in the form + // “ip_address/xx”. The criteria is satisfied if the source IP address + // of the downstream connection is contained in at least one of the + // specified subnets. If the parameter is not specified or the list is + // empty, the source IP address is ignored. + repeated envoy.api.v2.core.CidrRange source_ip_list = 4; + + // An optional string containing a comma-separated list of port numbers + // or ranges. The criteria is satisfied if the source port of the + // downstream connection is contained in at least one of the specified + // ranges. If the parameter is not specified, the source port is + // ignored. + string source_ports = 5; + } + + // The route table for the filter. All filter instances must have a route + // table, even if it is empty. + repeated TCPRoute routes = 1 [(validate.rules).repeated .min_items = 1]; + } + + // TCP Proxy filter configuration using deprecated V1 format. This is required for complex + // routing until filter chain matching in the listener is implemented. + // + // .. attention:: + // + // Using this field will lead to `problems loading the configuration + // `_. If you want to configure the filter + // using v1 config structure, please make this field a boolean with value ``true`` and configure + // via the opaque ``value`` field like is suggested in :api:`envoy/config/filter/README.md`. + DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; + + // The maximum number of unsuccessful connection attempts that will be made before + // giving up. If the parameter is not specified, 1 connection attempt will be made. + google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32.gte = 1]; +} diff --git a/api/envoy/config/health_checker/redis/v2/BUILD b/api/envoy/config/health_checker/redis/v2/BUILD new file mode 100644 index 000000000000..7d217c54dda8 --- /dev/null +++ b/api/envoy/config/health_checker/redis/v2/BUILD @@ -0,0 +1,8 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "redis", + srcs = ["redis.proto"], +) diff --git a/api/envoy/config/health_checker/redis/v2/redis.proto b/api/envoy/config/health_checker/redis/v2/redis.proto new file mode 100644 index 000000000000..7c82c9ac6a81 --- /dev/null +++ b/api/envoy/config/health_checker/redis/v2/redis.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package envoy.config.health_checker.redis.v2; +option go_package = "v2"; + +// [#not-implemented-hide:] +// [#protodoc-title: Redis] +// Redis :ref:`configuration overview `. +// Configuration for the Redis custom health checker. +message Redis { + // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + // by setting the specified key to any value and waiting for traffic to drain. + string key = 1; +} diff --git a/api/envoy/config/metrics/v2/BUILD b/api/envoy/config/metrics/v2/BUILD new file mode 100644 index 000000000000..d9df02a56c70 --- /dev/null +++ b/api/envoy/config/metrics/v2/BUILD @@ -0,0 +1,41 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "metrics_service", + srcs = ["metrics_service.proto"], + visibility = [ + "//envoy/config/bootstrap/v2:__pkg__", + ], + deps = [ + "//envoy/api/v2/core:grpc_service", + ], +) + +api_go_proto_library( + name = "metrics_service", + proto = ":metrics_service", + deps = [ + "//envoy/api/v2/core:grpc_service_go_proto", + ], +) + +api_proto_library( + name = "stats", + srcs = ["stats.proto"], + visibility = [ + "//envoy/config/bootstrap/v2:__pkg__", + ], + deps = [ + "//envoy/api/v2/core:address", + ], +) + +api_go_proto_library( + name = "stats", + proto = ":stats", + deps = [ + "//envoy/api/v2/core:address_go_proto", + ], +) diff --git a/api/envoy/config/metrics/v2/metrics_service.proto b/api/envoy/config/metrics/v2/metrics_service.proto new file mode 100644 index 000000000000..39e0f97241bb --- /dev/null +++ b/api/envoy/config/metrics/v2/metrics_service.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +// [#protodoc-title: Metrics service] + +package envoy.config.metrics.v2; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// Metrics Service is configured as a built-in *envoy.metrics_service* :ref:`StatsSink +// `. This opaque configuration will be used to create +// Metrics Service. +message MetricsServiceConfig { + // The upstream gRPC cluster that hosts the metrics service. + envoy.api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto new file mode 100644 index 000000000000..7aab95a43961 --- /dev/null +++ b/api/envoy/config/metrics/v2/stats.proto @@ -0,0 +1,202 @@ +// [#protodoc-title: Stats] +// Statistics :ref:`architecture overview `. + +syntax = "proto3"; + +package envoy.config.metrics.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/address.proto"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; + +// Configuration for pluggable stats sinks. +message StatsSink { + // The name of the stats sink to instantiate. The name must match a supported + // stats sink. The built-in stats sinks are: + // + // * :ref:`envoy.statsd ` + // * :ref:`envoy.dog_statsd ` + // * :ref:`envoy.metrics_service ` + // + // Sinks optionally support tagged/multiple dimensional metrics. + string name = 1; + + // Stats sink specific configuration which depends on the sink being + // instantiated. See :ref:`StatsdSink ` for an + // example. + google.protobuf.Struct config = 2; +} + +// Statistics configuration such as tagging. +message StatsConfig { + // Each stat name is iteratively processed through these tag specifiers. + // When a tag is matched, the first capture group is removed from the name so + // later :ref:`TagSpecifiers ` cannot match that + // same portion of the match. + repeated TagSpecifier stats_tags = 1; + + // Use all default tag regexes specified in Envoy. These can be combined with + // custom tags specified in :ref:`stats_tags + // `. They will be processed before + // the custom tags. + // + // .. note:: + // + // If any default tags are specified twice, the config will be considered + // invalid. + // + // See `well_known_names.h + // `_ + // for a list of the default tags in Envoy. + // + // If not provided, the value is assumed to be true. + google.protobuf.BoolValue use_all_default_tags = 2; +} + +// Designates a tag name and value pair. The value may be either a fixed value +// or a regex providing the value via capture groups. The specified tag will be +// unconditionally set if a fixed value, otherwise it will only be set if one +// or more capture groups in the regex match. +message TagSpecifier { + // Attaches an identifier to the tag values to identify the tag being in the + // sink. Envoy has a set of default names and regexes to extract dynamic + // portions of existing stats, which can be found in `well_known_names.h + // `_ + // in the Envoy repository. If a :ref:`tag_name + // ` is provided in the config and + // neither :ref:`regex ` or + // :ref:`fixed_value ` were specified, + // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. + // + // .. note:: + // + // It is invalid to specify the same tag name twice in a config. + string tag_name = 1; + + oneof tag_value { + // Designates a tag to strip from the tag extracted name and provide as a named + // tag value for all statistics. This will only occur if any part of the name + // matches the regex provided with one or more capture groups. + // + // The first capture group identifies the portion of the name to remove. The + // second capture group (which will normally be nested inside the first) will + // designate the value of the tag for the statistic. If no second capture + // group is provided, the first will also be used to set the value of the tag. + // All other capture groups will be ignored. + // + // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and + // one tag specifier: + // + // .. code-block:: json + // + // { + // "tag_name": "envoy.cluster_name", + // "regex": "^cluster\.((.+?)\.)" + // } + // + // Note that the regex will remove ``foo_cluster.`` making the tag extracted + // name ``cluster.upstream_rq_timeout`` and the tag value for + // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no + // ``.`` character because of the second capture group). + // + // Example 2. a stat name + // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two + // tag specifiers: + // + // .. code-block:: json + // + // [ + // { + // "tag_name": "envoy.http_user_agent", + // "regex": "^http(?=\.).*?\.user_agent\.((.+?)\.)\w+?$" + // }, + // { + // "tag_name": "envoy.http_conn_manager_prefix", + // "regex": "^http\.((.*?)\.)" + // } + // ] + // + // The two regexes of the specifiers will be processed in the definition order. + // + // The first regex will remove ``ios.``, leaving the tag extracted name + // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag + // ``envoy.http_user_agent`` will be added with tag value ``ios``. + // + // The second regex will remove ``connection_manager_1.`` from the tag + // extracted name produced by the first regex + // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving + // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag + // ``envoy.http_conn_manager_prefix`` will be added with the tag value + // ``connection_manager_1``. + string regex = 2; + + // Specifies a fixed tag value for the ``tag_name``. + string fixed_value = 3; + } +} + +// Stats configuration proto schema for built-in *envoy.statsd* sink. This sink does not support +// tagged metrics. +message StatsdSink { + oneof statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running `statsd `_ + // compliant listener. If specified, statistics will be flushed to this + // address. + envoy.api.v2.core.Address address = 1; + + // The name of a cluster that is running a TCP `statsd + // `_ compliant listener. If specified, + // Envoy will connect to this cluster to flush statistics. + string tcp_cluster_name = 2; + } + // [#not-implemented-hide:] Optional custom prefix for StatsdSink. If + // specified, this will override the default prefix. + // For example: + // + // .. code-block:: json + // + // { + // "prefix" : "envoy-prod" + // } + // + // will change emitted stats to + // + // .. code-block:: cpp + // + // envoy-prod.test_counter:1|c + // envoy-prod.test_timer:5|ms + // + // Note that the default prefix, "envoy", will be used if a prefix is not + // specified. + // + // Stats with default prefix: + // + // .. code-block:: cpp + // + // envoy.test_counter:1|c + // envoy.test_timer:5|ms + string prefix = 3; +} + +// Stats configuration proto schema for built-in *envoy.dog_statsd* sink. +// The sink emits stats with `DogStatsD `_ +// compatible tags. Tags are configurable via :ref:`StatsConfig +// `. +// [#comment:next free field: 3] +message DogStatsdSink { + oneof dog_statsd_specifier { + option (validate.required) = true; + + // The UDP address of a running DogStatsD compliant listener. If specified, + // statistics will be flushed to this address. + envoy.api.v2.core.Address address = 1; + } + + reserved 2; +} diff --git a/api/envoy/config/ratelimit/v2/BUILD b/api/envoy/config/ratelimit/v2/BUILD new file mode 100644 index 000000000000..f71f28e0b610 --- /dev/null +++ b/api/envoy/config/ratelimit/v2/BUILD @@ -0,0 +1,22 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "rls", + srcs = ["rls.proto"], + visibility = [ + "//envoy/config/bootstrap/v2:__pkg__", + ], + deps = [ + "//envoy/api/v2/core:grpc_service", + ], +) + +api_go_grpc_library( + name = "rls", + proto = ":rls", + deps = [ + "//envoy/api/v2/core:grpc_service_go_proto", + ], +) diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto new file mode 100644 index 000000000000..b4cd370874fa --- /dev/null +++ b/api/envoy/config/ratelimit/v2/rls.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.config.ratelimit.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Rate limit service] + +// Rate limit :ref:`configuration overview `. +message RateLimitServiceConfig { + oneof service_specifier { + option (validate.required) = true; + + // Specifies the cluster manager cluster name that hosts the rate limit + // service. The client will connect to this cluster when it needs to make + // rate limit service requests. This field is deprecated and `grpc_service` + // should be used instead. The :ref:`Envoy gRPC client + // ` will be used when this field is + // specified. + string cluster_name = 1 [(validate.rules).string.min_bytes = 1, deprecated = true]; + + // Specifies the gRPC service that hosts the rate limit service. The client + // will connect to this cluster when it needs to make rate limit service + // requests. + envoy.api.v2.core.GrpcService grpc_service = 2; + } +} diff --git a/api/envoy/config/trace/v2/BUILD b/api/envoy/config/trace/v2/BUILD new file mode 100644 index 000000000000..19fa06a45228 --- /dev/null +++ b/api/envoy/config/trace/v2/BUILD @@ -0,0 +1,22 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "trace", + srcs = ["trace.proto"], + visibility = [ + "//envoy/config/bootstrap/v2:__pkg__", + ], + deps = [ + "//envoy/api/v2/core:grpc_service", + ], +) + +api_go_proto_library( + name = "trace", + proto = ":trace", + deps = [ + "//envoy/api/v2/core:grpc_service_go_proto", + ], +) diff --git a/api/envoy/config/trace/v2/trace.proto b/api/envoy/config/trace/v2/trace.proto new file mode 100644 index 000000000000..c1154cf06c20 --- /dev/null +++ b/api/envoy/config/trace/v2/trace.proto @@ -0,0 +1,77 @@ +// [#protodoc-title: Tracing] +// Tracing :ref:`architecture overview `. + +syntax = "proto3"; + +package envoy.config.trace.v2; +option go_package = "v2"; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "google/protobuf/struct.proto"; + +import "validate/validate.proto"; + +// The tracing configuration specifies global +// settings for the HTTP tracer used by Envoy. The configuration is defined by +// the :ref:`Bootstrap ` :ref:`tracing +// ` field. Envoy may support other tracers +// in the future, but right now the HTTP tracer is the only one supported. +message Tracing { + message Http { + // The name of the HTTP trace driver to instantiate. The name must match a + // supported HTTP trace driver. *envoy.lightstep*, *envoy.zipkin*, and + // *envoy.dynamic.ot* are built-in trace drivers. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Trace driver specific configuration which depends on the driver being + // instantiated. See the :ref:`LightstepConfig + // `, :ref:`ZipkinConfig + // `, and :ref:`DynamicOtConfig + // ` trace drivers for examples. + google.protobuf.Struct config = 2; + } + // Provides configuration for the HTTP tracer. + Http http = 1; +} + +// Configuration for the LightStep tracer. +message LightstepConfig { + // The cluster manager cluster that hosts the LightStep collectors. + string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // File containing the access token to the `LightStep + // `_ API. + string access_token_file = 2 [(validate.rules).string.min_bytes = 1]; +} + +message ZipkinConfig { + // The cluster manager cluster that hosts the Zipkin collectors. Note that the + // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster + // resources `. + string collector_cluster = 1 [(validate.rules).string.min_bytes = 1]; + + // The API endpoint of the Zipkin service where the spans will be sent. When + // using a standard Zipkin installation, the API endpoint is typically + // /api/v1/spans, which is the default value. + string collector_endpoint = 2 [(validate.rules).string.min_bytes = 1]; +} + +// DynamicOtConfig is used to dynamically load a tracer from a shared library +// that implements the `OpenTracing dynamic loading API +// `_. +message DynamicOtConfig { + // Dynamic library implementing the `OpenTracing API + // `_. + string library = 1 [(validate.rules).string.min_bytes = 1]; + + // The configuration to use when creating a tracer from the given dynamic + // library. + google.protobuf.Struct config = 2; +} + +// Configuration structure. +message TraceServiceConfig { + // The upstream gRPC cluster that hosts the metrics service. + envoy.api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message.required = true]; +} diff --git a/api/envoy/service/README.md b/api/envoy/service/README.md new file mode 100644 index 000000000000..831b740a0ea8 --- /dev/null +++ b/api/envoy/service/README.md @@ -0,0 +1,3 @@ +Protocol buffer definitions for gRPC and REST services. + +Visibility should be constrained to none (default). diff --git a/api/envoy/service/accesslog/v2/BUILD b/api/envoy/service/accesslog/v2/BUILD new file mode 100644 index 000000000000..cdf5b36a8b7d --- /dev/null +++ b/api/envoy/service/accesslog/v2/BUILD @@ -0,0 +1,14 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "als", + srcs = ["als.proto"], + has_services = 1, + deps = [ + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:grpc_service", + "//envoy/config/filter/accesslog/v2:accesslog", + ], +) diff --git a/api/envoy/service/accesslog/v2/als.proto b/api/envoy/service/accesslog/v2/als.proto new file mode 100644 index 000000000000..ec60b6d1b961 --- /dev/null +++ b/api/envoy/service/accesslog/v2/als.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package envoy.service.accesslog.v2; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; +import "envoy/config/filter/accesslog/v2/accesslog.proto"; + +import "validate/validate.proto"; + +// Service for streaming access logs from Envoy to an access log server. +service AccessLogService { + // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. The server should + // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different + // API for "critical" access logs in which Envoy will buffer access logs for some period of time + // until it gets an ACK so it could then retry. This API is designed for high throughput with the + // expectation that it might be lossy. + rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { + } +} + +// Empty response for the StreamAccessLogs API. Will never be sent. See below. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message StreamAccessLogsResponse { +} + +// [#proto-status: experimental] +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream +// access logs without ever expecting a response. +message StreamAccessLogsMessage { + message Identifier { + // The node sending the access log messages over the stream. + envoy.api.v2.core.Node node = 1 [(validate.rules).message.required = true]; + + // The friendly name of the log configured in AccessLogServiceConfig. + string log_name = 2 [(validate.rules).string.min_bytes = 1]; + } + + // Identifier data that will only be sent in the first message on the stream. This is effectively + // structured metadata and is a performance optimization. + Identifier identifier = 1; + + // Wrapper for batches of HTTP access log entries. + message HTTPAccessLogEntries { + repeated envoy.config.filter.accesslog.v2.HTTPAccessLogEntry log_entry = 1 + [(validate.rules).repeated .min_items = 1]; + } + + // Wrapper for batches of TCP access log entries. + message TCPAccessLogEntries { + repeated envoy.config.filter.accesslog.v2.TCPAccessLogEntry log_entry = 1 + [(validate.rules).repeated .min_items = 1]; + } + + // Batches of log entries of a single type. Generally speaking, a given stream should only + // ever incude one type of log entry. + oneof log_entries { + option (validate.required) = true; + + HTTPAccessLogEntries http_logs = 2; + TCPAccessLogEntries tcp_logs = 3; + } +} diff --git a/api/envoy/service/auth/v2alpha/BUILD b/api/envoy/service/auth/v2alpha/BUILD new file mode 100644 index 000000000000..8a7f5ee509d7 --- /dev/null +++ b/api/envoy/service/auth/v2alpha/BUILD @@ -0,0 +1,24 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "attribute_context", + srcs = [ + "attribute_context.proto", + ], + deps = [ + "//envoy/api/v2/core:address", + ], +) + +api_proto_library( + name = "external_auth", + srcs = [ + "external_auth.proto", + ], + has_services = 1, + deps = [ + ":attribute_context", + ], +) diff --git a/api/envoy/service/auth/v2alpha/attribute_context.proto b/api/envoy/service/auth/v2alpha/attribute_context.proto new file mode 100644 index 000000000000..a8c4bd0b4023 --- /dev/null +++ b/api/envoy/service/auth/v2alpha/attribute_context.proto @@ -0,0 +1,133 @@ +syntax = "proto3"; + +// [#proto-status: draft] + +package envoy.service.auth.v2alpha; + +import "envoy/api/v2/core/address.proto"; + +import "google/protobuf/timestamp.proto"; + +// An attribute is a piece of metadata that describes an activity on a network. +// For example, the size of an HTTP request, or the status code of an HTTP response. +// +// Each attribute has a type and a name, which is logically defined as a proto message field +// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes +// supported by Envoy authorization system. +message AttributeContext { + // This message defines attributes for a node that handles a network request. + // The node can be either a service or an application that sends, forwards, + // or receives the request. Service peers should fill in the `service`, + // `principal`, and `labels` as appropriate. + message Peer { + // The address of the peer, this is typically the IP address. + // It can also be UDS path, or others. + envoy.api.v2.core.Address address = 1; + + // The canonical service name of the peer. + // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster + // ` + // If a more trusted source of the service name is available through mTLS/secure naming, it + // should be used. + string service = 2; + + // The labels associated with the peer. + // These could be pod labels for Kubernetes or tags for VMs. + // The source of the labels could be an X.509 certificate or other configuration. + map labels = 3; + + // The authenticated identity of this peer. + // For example, the identity associated with the workload such as a service account. + // If an X.509 certificate is used to assert the identity this field should be sourced from + // `Subject` or `Subject Alternative Names`. The primary identity should be the principal. + // The principal format is issuer specific. + // + // Example: + // * SPIFFE format is `spiffe://trust-domain/path` + // * Google account format is `https://accounts.google.com/{userid}` + string principal = 4; + } + + // Represents a network request, such as an HTTP request. + message Request { + // The timestamp when the proxy receives the first byte of the request. + google.protobuf.Timestamp time = 1; + + // Represents an HTTP request or an HTTP-like request. + HttpRequest http = 2; + + // More request types are added here as necessary. + } + + // This message defines attributes for an HTTP request. + // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. + message HttpRequest { + // The unique ID for a request, which can be propagated to downstream + // systems. The ID should have low probability of collision + // within a single day for a specific service. + // For HTTP requests, it should be X-Request-ID or equivalent. + string id = 1; + + // The HTTP request method, such as `GET`, `POST`. + string method = 2; + + // The HTTP request headers. If multiple headers share the same key, they + // must be merged according to the HTTP spec. All header keys must be + // lowercased, because HTTP header keys are case-insensitive. + map headers = 3; + + // The HTTP URL path. + string path = 4; + + // The HTTP request `Host` or 'Authority` header value. + string host = 5; + + // The HTTP URL scheme, such as `http` and `https`. + string scheme = 6; + + // The HTTP URL query in the format of `name1=value`&name2=value2`, as it + // appears in the first line of the HTTP request. No decoding is performed. + string query = 7; + + // The HTTP URL fragment, excluding leading `#`. No URL decoding is performed. + string fragment = 8; + + // The HTTP request size in bytes. If unknown, it must be -1. + int64 size = 9; + + // The network protocol used with the request, such as + // "http/1.1", "spdy/3", "h2", "h2c" + string protocol = 10; + } + + // The source of a network activity, such as starting a TCP connection. + // In a multi hop network activity, the source represents the sender of the + // last hop. + Peer source = 1; + + // The destination of a network activity, such as accepting a TCP connection. + // In a multi hop network activity, the destination represents the receiver of + // the last hop. + Peer destination = 2; + + // Represents a network request, such as an HTTP request. + Request request = 4; + + // This is analogous to http_request.headers, however these contents will not be sent to the + // upstream server. Context_extensions provide an extension mechanism for sending additional + // information to the auth server without modifying the proto definition. It maps to the internal + // opaque context in the filter chain. + map context_extensions = 10; +} + +// The following items are left out of this proto +// Request.Auth field for jwt tokens +// Request.Api for api management +// Origin peer that originated the request +// Caching Protocol +// request_context return values to inject back into the filter chain +// peer.claims -- from X.509 extensions +// Configuration +// - field mask to send +// - which return values from request_context are copied back +// - which return values are copied into request_headers diff --git a/api/envoy/service/auth/v2alpha/external_auth.proto b/api/envoy/service/auth/v2alpha/external_auth.proto new file mode 100644 index 000000000000..29acfa29cc75 --- /dev/null +++ b/api/envoy/service/auth/v2alpha/external_auth.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +// [#proto-status: draft] + +package envoy.service.auth.v2alpha; +option go_package = "v2alpha"; +option java_generic_services = true; + +import "envoy/service/auth/v2alpha/attribute_context.proto"; + +import "google/rpc/status.proto"; +import "validate/validate.proto"; + +// A generic interface for performing authorization check on incoming +// requests to a networked service. +service Authorization { + // Performs authorization check based on the attributes associated with the + // incoming request, and returns status `OK` or not `OK`. + rpc Check(CheckRequest) returns (CheckResponse); +} + +message CheckRequest { + // The request attributes. + AttributeContext attributes = 1; +} + +message CheckResponse { + // Status `OK` allows the request. Any other status indicates the request should be denied. + google.rpc.Status status = 1; + + // An optional message that contains HTTP response attributes. This message is + // used when the authorization service needs to send custom responses to the + // downstream client or, to modify/add request headers being dispatched to the upstream. + message HttpResponse { + // Http status code. + uint32 status_code = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}]; + + // Http entity headers. + map headers = 2; + + // Http entity body. + string body = 3; + } +} diff --git a/api/envoy/service/discovery/v2/BUILD b/api/envoy/service/discovery/v2/BUILD new file mode 100644 index 000000000000..12f376a27903 --- /dev/null +++ b/api/envoy/service/discovery/v2/BUILD @@ -0,0 +1,58 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "ads", + srcs = ["ads.proto"], + has_services = 1, + deps = [ + "//envoy/api/v2:discovery", + ], +) + +api_go_grpc_library( + name = "ads", + proto = ":ads", + deps = [ + "//envoy/api/v2:discovery_go_proto", + ], +) + +api_proto_library( + name = "hds", + srcs = ["hds.proto"], + has_services = 1, + deps = [ + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:health_check", + "//envoy/api/v2/endpoint", + ], +) + +api_go_grpc_library( + name = "hds", + proto = ":hds", + deps = [ + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/core:health_check_go_proto", + "//envoy/api/v2/endpoint:endpoint_go_proto", + ], +) + +api_proto_library( + name = "sds", + srcs = ["sds.proto"], + has_services = 1, + deps = [ + "//envoy/api/v2:discovery", + ], +) + +api_go_grpc_library( + name = "sds", + proto = ":sds", + deps = [ + "//envoy/api/v2:discovery_go_proto", + ], +) diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto new file mode 100644 index 000000000000..821ccb341db5 --- /dev/null +++ b/api/envoy/service/discovery/v2/ads.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package envoy.service.discovery.v2; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v2/discovery.proto"; + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message AdsDummy { +} + +// [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, +// and listeners are retained in the package `envoy.api.v2` for backwards +// compatibility with existing management servers. New development in discovery +// services should proceed in the package `envoy.service.discovery.v2`. + +// See https://github.com/lyft/envoy-api#apis for a description of the role of +// ADS and how it is intended to be used by a management server. ADS requests +// have the same structure as their singleton xDS counterparts, but can +// multiplex many resource types on a single stream. The type_url in the +// DiscoveryRequest/DiscoveryResponse provides sufficient information to recover +// the multiplexed singleton APIs at the Envoy instance and management server. +service AggregatedDiscoveryService { + // This is a gRPC-only API. + rpc StreamAggregatedResources(stream envoy.api.v2.DiscoveryRequest) + returns (stream envoy.api.v2.DiscoveryResponse) { + } +} diff --git a/api/envoy/service/discovery/v2/hds.proto b/api/envoy/service/discovery/v2/hds.proto new file mode 100644 index 000000000000..cdf6011973c2 --- /dev/null +++ b/api/envoy/service/discovery/v2/hds.proto @@ -0,0 +1,123 @@ +syntax = "proto3"; + +package envoy.service.discovery.v2; + +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/core/health_check.proto"; +import "envoy/api/v2/endpoint/endpoint.proto"; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +// [#proto-status: experimental] +// HDS is Health Discovery Service. It compliments Envoy’s health checking +// service by designating this Envoy to be a healthchecker for a subset of hosts +// in the cluster. The status of these health checks will be reported to the +// management server, where it can be aggregated etc and redistributed back to +// Envoy through EDS. +service HealthDiscoveryService { + // 1. Envoy starts up and if its can_healthcheck option in the static + // bootstrap config is enabled, sends HealthCheckRequest to the management + // server. It supplies its capabilities (which protocol it can health check + // with, what zone it resides in, etc.). + // 2. In response to (1), the management server designates this Envoy as a + // healthchecker to health check a subset of all upstream hosts for a given + // cluster (for example upstream Host 1 and Host 2). It streams + // HealthCheckSpecifier messages with cluster related configuration for all + // clusters this Envoy is designated to health check. Subsequent + // HealthCheckSpecifier message will be sent on changes to: + // a. Endpoints to health checks + // b. Per cluster configuration change + // 3. Envoy creates a health probe based on the HealthCheck config and sends + // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck + // configuration Envoy waits upon the arrival of the probe response and + // looks at the content of the response to decide whether the endpoint is + // healthy or not. If a response hasn’t been received within the timeout + // interval, the endpoint health status is considered TIMEOUT. + // 4. Envoy reports results back in an EndpointHealthResponse message. + // Envoy streams responses as often as the interval configured by the + // management server in HealthCheckSpecifier. + // 5. The management Server collects health statuses for all endpoints in the + // cluster (for all clusters) and uses this information to construct + // EndpointDiscoveryResponse messages. + // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load + // balances traffic to them without additional health checking. It may + // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection + // failed to a particular endpoint to account for health status propagation + // delay between HDS and EDS). + // By default, can_healthcheck is true. If can_healthcheck is false, Cluster + // configuration may not contain HealthCheck message. + // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above + // invariant? + // TODO(htuch): Add @amb67's diagram. + rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) + returns (stream HealthCheckSpecifier) { + } + + // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of + // request/response. Should we add an identifier to the HealthCheckSpecifier + // to bind with the response? + rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { + option (google.api.http) = { + post: "/v2/discovery:health_check" + body: "*" + }; + } +} + +// Defines supported protocols etc, so the management server can assign proper +// endpoints to healthcheck. +message Capability { + // Different Envoy instances may have different capabilities (e.g. Redis) + // and/or have ports enabled for different protocols. + enum Protocol { + HTTP = 0; + TCP = 1; + REDIS = 2; + } + repeated Protocol health_check_protocol = 1; +} + +message HealthCheckRequest { + envoy.api.v2.core.Node node = 1; + Capability capability = 2; +} + +message EndpointHealth { + envoy.api.v2.endpoint.Endpoint endpoint = 1; + envoy.api.v2.core.HealthStatus health_status = 2; +} + +message EndpointHealthResponse { + repeated EndpointHealth endpoints_health = 1; +} + +message HealthCheckRequestOrEndpointHealthResponse { + oneof request_type { + HealthCheckRequest health_check_request = 1; + EndpointHealthResponse endpoint_health_response = 2; + } +} + +message LocalityEndpoints { + envoy.api.v2.core.Locality locality = 1; + repeated envoy.api.v2.endpoint.Endpoint endpoints = 2; +} + +// The cluster name and locality is provided to Envoy for the endpoints that it +// health checks to support statistics reporting, logging and debugging by the +// Envoy instance (outside of HDS). For maximum usefulness, it should match the +// same cluster structure as that provided by EDS. +message ClusterHealthCheck { + string cluster_name = 1; + repeated envoy.api.v2.core.HealthCheck health_checks = 2; + repeated LocalityEndpoints endpoints = 3; +} + +message HealthCheckSpecifier { + repeated ClusterHealthCheck health_check = 1; + // The default is 1 second. + google.protobuf.Duration interval = 2; +} diff --git a/api/envoy/service/discovery/v2/sds.proto b/api/envoy/service/discovery/v2/sds.proto new file mode 100644 index 000000000000..4bea525fa453 --- /dev/null +++ b/api/envoy/service/discovery/v2/sds.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.service.discovery.v2; + +import "envoy/api/v2/discovery.proto"; + +import "google/api/annotations.proto"; + +// [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing +// services: https://github.com/google/protobuf/issues/4221 +message SdsDummy { +} + +service SecretDiscoveryService { + rpc StreamSecrets(stream envoy.api.v2.DiscoveryRequest) + returns (stream envoy.api.v2.DiscoveryResponse) { + } + + rpc FetchSecrets(envoy.api.v2.DiscoveryRequest) returns (envoy.api.v2.DiscoveryResponse) { + option (google.api.http) = { + post: "/v2/discovery:secrets" + body: "*" + }; + } +} diff --git a/api/envoy/service/load_stats/v2/BUILD b/api/envoy/service/load_stats/v2/BUILD new file mode 100644 index 000000000000..4fc650019a75 --- /dev/null +++ b/api/envoy/service/load_stats/v2/BUILD @@ -0,0 +1,22 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "lrs", + srcs = ["lrs.proto"], + has_services = 1, + deps = [ + "//envoy/api/v2/core:base", + "//envoy/api/v2/endpoint:load_report", + ], +) + +api_go_grpc_library( + name = "lrs", + proto = ":lrs", + deps = [ + "//envoy/api/v2/core:base_go_proto", + "//envoy/api/v2/endpoint:load_report_go_proto", + ], +) diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto new file mode 100644 index 000000000000..2181fa0ff16f --- /dev/null +++ b/api/envoy/service/load_stats/v2/lrs.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; + +package envoy.service.load_stats.v2; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; +import "envoy/api/v2/endpoint/load_report.proto"; + +import "google/protobuf/duration.proto"; + +import "validate/validate.proto"; + +// [#protodoc-title: Load reporting service] + +service LoadReportingService { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { + } +} + +// A load report Envoy sends to the management server. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message LoadStatsRequest { + // Node identifier for Envoy instance. + envoy.api.v2.core.Node node = 1; + + // A list of load stats to report. + repeated envoy.api.v2.endpoint.ClusterStats cluster_stats = 2; +} + +// The management server sends envoy a LoadStatsResponse with all clusters it +// is interested in learning load stats about. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message LoadStatsResponse { + // Clusters to report stats for. + repeated string clusters = 1 [(validate.rules).repeated .min_items = 1]; + + // The interval of time to collect stats. The default is 10 seconds. + google.protobuf.Duration load_reporting_interval = 2; +} diff --git a/api/envoy/service/metrics/v2/BUILD b/api/envoy/service/metrics/v2/BUILD new file mode 100644 index 000000000000..cdaf519d7a82 --- /dev/null +++ b/api/envoy/service/metrics/v2/BUILD @@ -0,0 +1,15 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "metrics_service", + srcs = ["metrics_service.proto"], + has_services = 1, + require_py = 0, + deps = [ + "//envoy/api/v2/core:base", + "//envoy/api/v2/core:grpc_service", + "@promotheus_metrics_model//:client_model", + ], +) diff --git a/api/envoy/service/metrics/v2/metrics_service.proto b/api/envoy/service/metrics/v2/metrics_service.proto new file mode 100644 index 000000000000..34c219a2bef8 --- /dev/null +++ b/api/envoy/service/metrics/v2/metrics_service.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +// [#proto-status: draft] + +package envoy.service.metrics.v2; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; + +import "metrics.proto"; + +import "validate/validate.proto"; + +// Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric +// data model as a standard to represent metrics information. +service MetricsService { + // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any + // response to be sent as nothing would be done in the case of failure. + rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { + } +} + +message StreamMetricsResponse { +} + +message StreamMetricsMessage { + message Identifier { + // The node sending metrics over the stream. + envoy.api.v2.core.Node node = 1 [(validate.rules).message.required = true]; + } + + // Identifier data effectively is a structured metadata. As a performance optimization this will + // only be sent in the first message on the stream. + Identifier identifier = 1; + + // A list of metric entries + repeated io.prometheus.client.MetricFamily envoy_metrics = 2; +} diff --git a/api/envoy/service/ratelimit/v2/BUILD b/api/envoy/service/ratelimit/v2/BUILD new file mode 100644 index 000000000000..d934b8480b8d --- /dev/null +++ b/api/envoy/service/ratelimit/v2/BUILD @@ -0,0 +1,22 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_grpc_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "rls", + srcs = ["rls.proto"], + has_services = 1, + deps = [ + "//envoy/api/v2/core:grpc_service", + "//envoy/api/v2/ratelimit", + ], +) + +api_go_grpc_library( + name = "rls", + proto = ":rls", + deps = [ + "//envoy/api/v2/core:grpc_service_go_proto", + "//envoy/api/v2/ratelimit:ratelimit_go_proto", + ], +) diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto new file mode 100644 index 000000000000..c1a416fcc842 --- /dev/null +++ b/api/envoy/service/ratelimit/v2/rls.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package envoy.service.ratelimit.v2; +option go_package = "v2"; + +import "envoy/api/v2/ratelimit/ratelimit.proto"; + +import "validate/validate.proto"; + +service RateLimitService { + // Determine whether rate limiting should take place. + rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { + } +} + +// Main message for a rate limit request. The rate limit service is designed to be fully generic +// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded +// configuration will parse the request and find the most specific limit to apply. In addition, +// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors +// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any +// of them are over limit. This enables more complex application level rate limiting scenarios +// if desired. +// [#not-implemented-hide:] Hiding API for now. +message RateLimitRequest { + // All rate limit requests must specify a domain. This enables the configuration to be per + // application without fear of overlap. E.g., "envoy". + string domain = 1; + + // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is + // processed by the service (see below). If any of the descriptors are over limit, the entire + // request is considered to be over limit. + repeated envoy.api.v2.ratelimit.RateLimitDescriptor descriptors = 2; + + // Rate limit requests can optionally specify the number of hits a request adds to the matched + // limit. If the value is not set in the message, a request increases the matched limit by 1. + uint32 hits_addend = 3; +} + +// A response from a ShouldRateLimit call. +// [#not-implemented-hide:] Hiding API for now. +message RateLimitResponse { + enum Code { + UNKNOWN = 0; + OK = 1; + OVER_LIMIT = 2; + } + + // Defines an actual rate limit in terms of requests per unit of time and the unit itself. + message RateLimit { + enum Unit { + UNKNOWN = 0; + SECOND = 1; + MINUTE = 2; + HOUR = 3; + DAY = 4; + } + + uint32 requests_per_unit = 1; + Unit unit = 2; + } + + message DescriptorStatus { + // The response code for an individual descriptor. + Code code = 1; + // The current limit as configured by the server. Useful for debugging, etc. + RateLimit current_limit = 2; + // The limit remaining in the current time unit. + uint32 limit_remaining = 3; + } + + // The overall response code which takes into account all of the descriptors that were passed + // in the RateLimitRequest message. + Code overall_code = 1; + // A list of DescriptorStatus messages which matches the length of the descriptor list passed + // in the RateLimitRequest. This can be used by the caller to determine which individual + // descriptors failed and/or what the currently configured limits are for all of them. + repeated DescriptorStatus statuses = 2; +} diff --git a/api/envoy/service/trace/v2/BUILD b/api/envoy/service/trace/v2/BUILD new file mode 100644 index 000000000000..ecd4d504ac9c --- /dev/null +++ b/api/envoy/service/trace/v2/BUILD @@ -0,0 +1,14 @@ +load("//bazel:api_build_system.bzl", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "trace_service", + srcs = ["trace_service.proto"], + has_services = 1, + require_py = 0, + deps = [ + "//envoy/api/v2/core:base", + "@io_opencensus_trace//:trace_model", + ], +) diff --git a/api/envoy/service/trace/v2/trace_service.proto b/api/envoy/service/trace/v2/trace_service.proto new file mode 100644 index 000000000000..795f61f916cf --- /dev/null +++ b/api/envoy/service/trace/v2/trace_service.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +// [#proto-status: draft] + +package envoy.service.trace.v2; +option go_package = "v2"; +option java_generic_services = true; + +import "envoy/api/v2/core/base.proto"; +import "trace.proto"; + +import "google/api/annotations.proto"; + +import "validate/validate.proto"; + +// Service for streaming traces to server that consumes the trace data. It +// uses OpenCensus data model as a standard to represent trace information. +service TraceService { + // Envoy will connect and send StreamTracesMessage messages forever. It does + // not expect any response to be sent as nothing would be done in the case + // of failure. + rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { + } +} + +message StreamTracesResponse { +} + +message StreamTracesMessage { + message Identifier { + // The node sending the access log messages over the stream. + envoy.api.v2.core.Node node = 1 [(validate.rules).message.required = true]; + } + + // Identifier data effectively is a structured metadata. + // As a performance optimization this will only be sent in the first message + // on the stream. + Identifier identifier = 1; + + // A list of Span entries + repeated opencensus.proto.trace.Span spans = 2; +} diff --git a/api/envoy/type/BUILD b/api/envoy/type/BUILD new file mode 100644 index 000000000000..7db404b327e9 --- /dev/null +++ b/api/envoy/type/BUILD @@ -0,0 +1,25 @@ +load("//bazel:api_build_system.bzl", "api_proto_library", "api_go_proto_library") + +licenses(["notice"]) # Apache 2 + +api_proto_library( + name = "percent", + srcs = ["percent.proto"], + visibility = ["//visibility:public"], +) + +api_go_proto_library( + name = "percent", + proto = ":percent", +) + +api_proto_library( + name = "range", + srcs = ["range.proto"], + visibility = ["//visibility:public"], +) + +api_go_proto_library( + name = "range", + proto = ":range", +) diff --git a/api/envoy/type/percent.proto b/api/envoy/type/percent.proto new file mode 100644 index 000000000000..4dfbb360c03c --- /dev/null +++ b/api/envoy/type/percent.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package envoy.type; + +import "google/protobuf/wrappers.proto"; + +import "validate/validate.proto"; +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Percent] + +// Identifies a percentage, in the range [0.0, 100.0]. +message Percent { + double value = 1 [(validate.rules).double = {gte: 0, lte: 100}]; +} + +// A fractional percentage is used in cases in which for performance reasons performing floating +// point to integer conversions during randomness calculations is undesirable. The message includes +// both a numerator and denominator that together determine the final fractional value. +// +// * **Example**: 1/100 = 1%. +// * **Example**: 3/10000 = 0.03%. +message FractionalPercent { + // Specifies the numerator. Defaults to 0. + uint32 numerator = 1; + + // Fraction percentages support several fixed denominator values. + enum DenominatorType { + // 100. + // + // **Example**: 1/100 = 1%. + HUNDRED = 0; + + // 10,000. + // + // **Example**: 1/10000 = 0.01%. + TEN_THOUSAND = 1; + + // 1,000,000. + // + // **Example**: 1/1000000 = 0.0001%. + MILLION = 2; + } + + // Specifies the denominator. If the denominator specified is less than the numerator, the final + // fractional percentage is capped at 1 (100%). + DenominatorType denominator = 2 [(validate.rules).enum.defined_only = true]; +} diff --git a/api/envoy/type/range.proto b/api/envoy/type/range.proto new file mode 100644 index 000000000000..fd6045e7fd28 --- /dev/null +++ b/api/envoy/type/range.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package envoy.type; +option go_package = "envoy_type"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.equal_all) = true; + +// [#protodoc-title: Range] + +// Specifies the int64 start and end of the range using half-open interval semantics [start, +// end). +message Int64Range { + // start of the range (inclusive) + int64 start = 1; + + // end of the range (exclusive) + int64 end = 2; +} diff --git a/api/examples/service_envoy/BUILD b/api/examples/service_envoy/BUILD new file mode 100644 index 000000000000..58174bad7640 --- /dev/null +++ b/api/examples/service_envoy/BUILD @@ -0,0 +1,6 @@ +licenses(["notice"]) # Apache 2 + +exports_files([ + "http_connection_manager.pb", + "listeners.pb", +]) diff --git a/api/examples/service_envoy/http_connection_manager.pb b/api/examples/service_envoy/http_connection_manager.pb new file mode 100644 index 000000000000..aed3d9bbf469 --- /dev/null +++ b/api/examples/service_envoy/http_connection_manager.pb @@ -0,0 +1,25 @@ +codec_type: AUTO + +stat_prefix: "ingress_http" + +route_config { + virtual_hosts { + name: "service" + domains: "*" + routes { + match { + prefix: "/service" + } + route { + cluster: "local_service" + timeout { + seconds: 0 + } + } + } + } +} + +http_filters { + name: "router" +} diff --git a/api/examples/service_envoy/listeners.pb b/api/examples/service_envoy/listeners.pb new file mode 100644 index 000000000000..6cbd8b8ba054 --- /dev/null +++ b/api/examples/service_envoy/listeners.pb @@ -0,0 +1,11 @@ +address { + socket_address { + protocol: TCP + port_value: 80 + } +} +filter_chains { + filters { + name: "http_connection_manager" + } +} diff --git a/api/test/build/BUILD b/api/test/build/BUILD new file mode 100644 index 000000000000..d331c5a9fc42 --- /dev/null +++ b/api/test/build/BUILD @@ -0,0 +1,39 @@ +load("//bazel:api_build_system.bzl", "api_cc_test", "api_go_test") + +licenses(["notice"]) # Apache 2 + +api_cc_test( + name = "build_test", + srcs = ["build_test.cc"], + proto_deps = [ + "//envoy/api/v2:cds", + "//envoy/api/v2:eds", + "//envoy/api/v2:lds", + "//envoy/api/v2:rds", + "//envoy/service/accesslog/v2:als", + "//envoy/service/discovery/v2:ads", + "//envoy/service/discovery/v2:hds", + "//envoy/service/metrics/v2:metrics_service", + "//envoy/service/ratelimit/v2:rls", + ], +) + +api_go_test( + name = "go_build_test", + size = "small", + srcs = ["go_build_test.go"], + importpath = "go_build_test", + deps = [ + "//envoy/api/v2:cds_go_grpc", + "//envoy/api/v2:eds_go_grpc", + "//envoy/api/v2:lds_go_grpc", + "//envoy/api/v2:rds_go_grpc", + "//envoy/api/v2/auth:auth_go_proto", + "//envoy/api/v2/auth:cert_go_proto", + "//envoy/config/bootstrap/v2:bootstrap_go_proto", + "//envoy/service/discovery/v2:ads_go_grpc", + "//envoy/service/discovery/v2:hds_go_grpc", + "//envoy/service/discovery/v2:sds_go_grpc", + "//envoy/service/ratelimit/v2:rls_go_grpc", + ], +) diff --git a/api/test/build/build_test.cc b/api/test/build/build_test.cc new file mode 100644 index 000000000000..6397c6391ec8 --- /dev/null +++ b/api/test/build/build_test.cc @@ -0,0 +1,34 @@ +// NOLINT(namespace-envoy) +#include +#include + +#include "google/protobuf/descriptor.h" + +// Basic C++ build/link validation for the v2 xDS APIs. +int main(int argc, char* argv[]) { + const auto methods = { + "envoy.api.v2.ClusterDiscoveryService.FetchClusters", + "envoy.api.v2.ClusterDiscoveryService.StreamClusters", + "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", + "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", + "envoy.api.v2.ListenerDiscoveryService.FetchListeners", + "envoy.api.v2.ListenerDiscoveryService.StreamListeners", + "envoy.api.v2.RouteDiscoveryService.FetchRoutes", + "envoy.api.v2.RouteDiscoveryService.StreamRoutes", + "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources", + "envoy.service.discovery.v2.HealthDiscoveryService.FetchHealthCheck", + "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck", + "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs", + "envoy.service.metrics.v2.MetricsService.StreamMetrics", + "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit", + }; + + for (const auto& method : methods) { + if (google::protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) == nullptr) { + std::cout << "Unable to find method descriptor for " << method << std::endl; + exit(EXIT_FAILURE); + } + } + + exit(EXIT_SUCCESS); +} diff --git a/api/test/build/go_build_test.go b/api/test/build/go_build_test.go new file mode 100644 index 000000000000..e97e3c86de9d --- /dev/null +++ b/api/test/build/go_build_test.go @@ -0,0 +1,21 @@ +package go_build_test + +import ( + "testing" + + _ "github.com/envoyproxy/data-plane-api/api/ads" + _ "github.com/envoyproxy/data-plane-api/api/auth" + _ "github.com/envoyproxy/data-plane-api/api/bootstrap" + _ "github.com/envoyproxy/data-plane-api/api/cds" + _ "github.com/envoyproxy/data-plane-api/api/cert" + _ "github.com/envoyproxy/data-plane-api/api/eds" + _ "github.com/envoyproxy/data-plane-api/api/hds" + _ "github.com/envoyproxy/data-plane-api/api/lds" + _ "github.com/envoyproxy/data-plane-api/api/rds" + _ "github.com/envoyproxy/data-plane-api/api/rls" + _ "github.com/envoyproxy/data-plane-api/api/sds" +) + +func TestNoop(t *testing.T) { + // Noop test that verifies the successful importation of Envoy V2 API protos +} diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD new file mode 100644 index 000000000000..467a3e7488ae --- /dev/null +++ b/api/test/validate/BUILD @@ -0,0 +1,33 @@ +load("//bazel:api_build_system.bzl", "api_cc_test", "api_proto_library") + +licenses(["notice"]) # Apache 2 + +api_cc_test( + name = "pgv_test", + srcs = ["pgv_test.cc"], + proto_deps = [ + "//envoy/api/v2:cds", + "//envoy/api/v2:eds", + "//envoy/api/v2:lds", + "//envoy/api/v2:rds", + "//envoy/api/v2/core:protocol", + "//envoy/api/v2/listener", + "//envoy/api/v2/route", + "//envoy/config/bootstrap/v2:bootstrap", + "//envoy/config/filter/accesslog/v2:accesslog", + "//envoy/config/filter/http/buffer/v2:buffer", + "//envoy/config/filter/http/fault/v2:fault", + "//envoy/config/filter/http/gzip/v2:gzip", + "//envoy/config/filter/http/health_check/v2:health_check", + "//envoy/config/filter/http/ip_tagging/v2:ip_tagging", + "//envoy/config/filter/http/lua/v2:lua", + "//envoy/config/filter/http/router/v2:router", + "//envoy/config/filter/http/squash/v2:squash", + "//envoy/config/filter/http/transcoder/v2:transcoder", + "//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager", + "//envoy/config/filter/network/mongo_proxy/v2:mongo_proxy", + "//envoy/config/filter/network/redis_proxy/v2:redis_proxy", + "//envoy/config/filter/network/tcp_proxy/v2:tcp_proxy", + "//envoy/config/health_checker/redis/v2:redis", + ], +) diff --git a/api/test/validate/pgv_test.cc b/api/test/validate/pgv_test.cc new file mode 100644 index 000000000000..a8f0b3d60ff4 --- /dev/null +++ b/api/test/validate/pgv_test.cc @@ -0,0 +1,76 @@ +// NOLINT(namespace-envoy) +#include +#include + +// We don't use all the headers in the test below, but including them anyway as +// a cheap way to get some C++ compiler sanity checking. +#include "envoy/api/v2/cds.pb.validate.h" +#include "envoy/api/v2/eds.pb.validate.h" +#include "envoy/api/v2/lds.pb.validate.h" +#include "envoy/api/v2/rds.pb.validate.h" +#include "envoy/api/v2/core/protocol.pb.validate.h" +#include "envoy/config/health_checker/redis/v2/redis.pb.validate.h" +#include "envoy/config/filter/accesslog/v2/accesslog.pb.validate.h" +#include "envoy/config/filter/http/buffer/v2/buffer.pb.validate.h" +#include "envoy/config/filter/http/fault/v2/fault.pb.validate.h" +#include "envoy/config/filter/http/gzip/v2/gzip.pb.validate.h" +#include "envoy/config/filter/http/health_check/v2/health_check.pb.validate.h" +#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.validate.h" +#include "envoy/config/filter/http/lua/v2/lua.pb.validate.h" +#include "envoy/config/filter/http/router/v2/router.pb.validate.h" +#include "envoy/config/filter/http/squash/v2/squash.pb.validate.h" +#include "envoy/config/filter/http/transcoder/v2/transcoder.pb.validate.h" +#include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.validate.h" +#include "envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.pb.validate.h" +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.validate.h" +#include "envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.validate.h" +#include "envoy/api/v2/listener/listener.pb.validate.h" +#include "envoy/api/v2/route/route.pb.validate.h" +#include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" + +#include "google/protobuf/text_format.h" + +template struct TestCase { + void run() { + std::string err; + if (Validate(invalid_message, &err)) { + std::cerr << "Unexpected successful validation of invalid message: " + << invalid_message.DebugString() << std::endl; + exit(EXIT_FAILURE); + } + if (!Validate(valid_message, &err)) { + std::cerr << "Unexpected failed validation of valid message: " << valid_message.DebugString() + << ", " << err << std::endl; + exit(EXIT_FAILURE); + } + } + + Proto& invalid_message; + Proto& valid_message; +}; + +// Basic protoc-gen-validate C++ validation header inclusion and Validate calls +// from data plane API. +int main(int argc, char* argv[]) { + envoy::config::bootstrap::v2::Bootstrap invalid_bootstrap; + // This is a baseline test of the validation features we care about. It's + // probably not worth adding in every filter and field that we want to valid + // in the API upfront, but as regressions occur, this is the place to add the + // specific case. + const std::string valid_bootstrap_text = R"EOF( + node {} + cluster_manager {} + admin { + access_log_path: "/dev/null" + address {} + } + )EOF"; + envoy::config::bootstrap::v2::Bootstrap valid_bootstrap; + if (!google::protobuf::TextFormat::ParseFromString(valid_bootstrap_text, &valid_bootstrap)) { + std::cerr << "Unable to parse text proto: " << valid_bootstrap_text << std::endl; + exit(EXIT_FAILURE); + } + TestCase{invalid_bootstrap, valid_bootstrap}.run(); + + exit(EXIT_SUCCESS); +} diff --git a/api/tools/BUILD b/api/tools/BUILD new file mode 100644 index 000000000000..668ea6fedb93 --- /dev/null +++ b/api/tools/BUILD @@ -0,0 +1,23 @@ +licenses(["notice"]) # Apache 2 + +py_binary( + name = "generate_listeners", + srcs = ["generate_listeners.py"], + licenses = ["notice"], # Apache 2 + visibility = ["//visibility:public"], + deps = [ + "//envoy/api/v2:lds_py", + "//envoy/config/filter/network/http_connection_manager/v2:http_connection_manager_py", + ], +) + +py_test( + name = "generate_listeners_test", + srcs = ["generate_listeners_test.py"], + data = [ + "//examples/service_envoy:http_connection_manager.pb", + "//examples/service_envoy:listeners.pb", + ], + visibility = ["//visibility:public"], + deps = [":generate_listeners"], +) diff --git a/api/tools/generate_listeners.py b/api/tools/generate_listeners.py new file mode 100644 index 000000000000..fdcfb1385277 --- /dev/null +++ b/api/tools/generate_listeners.py @@ -0,0 +1,67 @@ +# Map from listeners proto, with holes where filter config fragments should go, and +# a list of filter config fragment protos, to a final listeners.pb with the +# config fragments converted to the opaque Struct representation. + +import sys + +# Some evil hack to deal with the fact that Bazel puts both google/api and +# google/protobuf roots in the sys.path, and Python gets confused, e.g. it +# thinks that there is no api package if it encounters the google/protobuf root +# in sys.path first. +from pkgutil import extend_path +import google +google.__path__ = extend_path(google.__path__, google.__name__) + +from google.protobuf import json_format +from google.protobuf import struct_pb2 +from google.protobuf import text_format + +from envoy.api.v2 import lds_pb2 +from envoy.config.filter.network.http_connection_manager.v2 import http_connection_manager_pb2 + + +# Convert an arbitrary proto object to its Struct proto representation. +def ProtoToStruct(proto): + json_rep = json_format.MessageToJson(proto) + parsed_msg = struct_pb2.Struct() + json_format.Parse(json_rep, parsed_msg) + return parsed_msg + + +# Parse a proto from the filesystem. +def ParseProto(path, filter_name): + # We only know about some filter config protos ahead of time. + KNOWN_FILTERS = { + 'http_connection_manager': + lambda: http_connection_manager_pb2.HttpConnectionManager() + } + filter_config = KNOWN_FILTERS[filter_name]() + with open(path, 'r') as f: + text_format.Merge(f.read(), filter_config) + return filter_config + + +def GenerateListeners(listeners_pb_path, output_pb_path, output_json_path, + fragments): + listener = lds_pb2.Listener() + with open(listeners_pb_path, 'r') as f: + text_format.Merge(f.read(), listener) + + for filter_chain in listener.filter_chains: + for f in filter_chain.filters: + f.config.CopyFrom(ProtoToStruct(ParseProto(fragments.next(), f.name))) + + with open(output_pb_path, 'w') as f: + f.write(str(listener)) + + with open(output_json_path, 'w') as f: + f.write(json_format.MessageToJson(listener)) + + +if __name__ == '__main__': + if len(sys.argv) < 4: + print('Usage: %s ') % sys.argv[0] + sys.exit(1) + + GenerateListeners(sys.argv[1], sys.argv[2], sys.argv[3], iter(sys.argv[4:])) diff --git a/api/tools/generate_listeners_test.py b/api/tools/generate_listeners_test.py new file mode 100644 index 000000000000..bf20a6f0e013 --- /dev/null +++ b/api/tools/generate_listeners_test.py @@ -0,0 +1,15 @@ +"""Tests for generate_listeners.""" + +import os + +import generate_listeners + +if __name__ == "__main__": + srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api') + generate_listeners.GenerateListeners( + os.path.join(srcdir, "examples/service_envoy/listeners.pb"), + "/dev/stdout", "/dev/stdout", + iter([ + os.path.join(srcdir, + "examples/service_envoy/http_connection_manager.pb") + ])) diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index ff36f7d37111..4c125f523ea9 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -65,6 +65,3 @@ to point to a local copy. The option can used multiple times to override multipl The name of the dependency can be found in [the repository locations file.](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl) The path of the local copy has to be absolute path. - -For example you can point the data plane API to a local copy checked out in home directory and run tests: -`bazel test --override_repository="envoy_api={$HOME}/data-plane-api"` diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index f07724cb9bc1..7b3ba8242984 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -101,6 +101,27 @@ _default_envoy_build_config = repository_rule( }, ) +def _default_envoy_api_impl(ctx): + ctx.file("WORKSPACE", "") + ctx.file("BUILD.bazel", "") + api_dirs = [ + "bazel", + "docs", + "envoy", + "examples", + "test", + "tools", + ] + for d in api_dirs: + ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d) + +_default_envoy_api = repository_rule( + implementation = _default_envoy_api_impl, + attrs = { + "api": attr.label(default="@envoy//api:BUILD"), + }, +) + # Python dependencies. If these become non-trivial, we might be better off using a virtualenv to # wrap them, but for now we can treat them as first-class Bazel. def _python_deps(): @@ -140,12 +161,23 @@ def _go_deps(skip_targets): _repository_impl("io_bazel_rules_go") def _envoy_api_deps(): - _repository_impl("envoy_api") + # Treat the data plane API as an external repo, this simplifies exporting the API to + # https://github.com/envoyproxy/data-plane-api. + if "envoy_api" not in native.existing_rules().keys(): + _default_envoy_api(name="envoy_api") native.bind( name = "http_api_protos", actual = "@googleapis//:http_api_protos", ) + _repository_impl( + name = "six_archive", + build_file = "@com_google_protobuf//:six.BUILD", + ) + native.bind( + name = "six", + actual = "@six_archive//:six", + ) def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): envoy_repository = repository_rule( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index d3f974e1bff2..189d6670714b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -75,10 +75,6 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "protobuf-3.5.0", urls = ["https://github.com/google/protobuf/archive/v3.5.0.tar.gz"], ), - envoy_api = dict( - commit = "2dcc435e8ae1d35f8c3a4fa9f132778482fb1a78", - remote = "https://github.com/envoyproxy/data-plane-api", - ), grpc_httpjson_transcoding = dict( commit = "e4f58aa07b9002befa493a0a82e10f2e98b51fc6", remote = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding", @@ -87,6 +83,11 @@ REPOSITORY_LOCATIONS = dict( commit = "0.10.3", remote = "https://github.com/bazelbuild/rules_go", ), + six_archive = dict( + sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a", + strip_prefix = "", + urls = ["https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz#md5=34eed507548117b2ab523ab14b2f8b55"], + ), # I'd love to name this `com_github_google_subpar`, but something in the Subpar # code assumes its repository name is just `subpar`. subpar = dict( diff --git a/ci/README.md b/ci/README.md index b15745db41a2..3c0884029222 100644 --- a/ci/README.md +++ b/ci/README.md @@ -77,6 +77,7 @@ The build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/env The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: +* `bazel.api` — build and run API tests under `-c fastbuild` with clang-5.0. * `bazel.asan` — build and run tests under `-c dbg --config=clang-asan` with clang-5.0. * `bazel.debug` — build Envoy static binary and run tests under `-c dbg`. * `bazel.debug.server_only` — build Envoy static binary under `-c dbg`. @@ -88,7 +89,7 @@ The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang-5.0. * `check_format`— run `clang-format` 5.0 and `buildifier` on entire source tree. * `fix_format`— run and enforce `clang-format` 5.0 and `buildifier` on entire source tree. -* `doc`— build documentation tree in `generated/docs`. +* `docs`— build documentation tree in `generated/docs`. # Testing changes to the build image as a developer diff --git a/ci/api_mirror.sh b/ci/api_mirror.sh new file mode 100755 index 000000000000..dc2dafcfbae3 --- /dev/null +++ b/ci/api_mirror.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -e + +# TODO(htuch): Remove this once we've verified this script works. +set -x + +if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] +then + echo "Cloning..." + git clone git@github.com:envoyproxy/envoyproxy.github.io "$CHECKOUT_DIR" + + git -C "$CHECKOUT_DIR" config user.name "data-plane-api(CircleCI)" + git -C "$CHECKOUT_DIR" config user.email data-plane-api@users.noreply.github.com + git -C "$CHECKOUT_DIR" fetch + git -C "$CHECKOUT_DIR" checkout -B master origin/master + + # Determine last envoyproxy/envoy SHA in envoyproxy/data-plane-api + MIRROR_MSG="Mirrored from https://github.com/envoyproxy/envoy" + LAST_ENVOY_SHA=$(git -C "$CHECKOUT_DIR" log --grep="$MIRROR_MSG" -n 1 | grep "$MIRROR_MSG" | \ + tail -n 1 | sed -e "s#.*$MIRROR_MSG @ ##") + + echo "Last mirrored envoyproxy/envoy SHA is $LAST_ENVOY_SHA" + + # Compute SHA sequence to replay in envoyproxy/data-plane-api + SHAS=$(git rev-list --reverse "$LAST_ENVOY_SHA"..HEAD api/) + + # For each SHA, hard reset, rsync api/ and generate commit in + # envoyproxy/data-plane-api + API_WORKING_DIR="$TMPDIR/envoy-api-mirror" + git worktree add "$API_WORKING_DIR" master + for sha in $SHAS + do + git -C "$API_WORKING_DIR" reset --hard "$sha" + COMMIT_MSG=$(git -C "$API_WORKING_DIR" log --format=%B -n 1) + QUALIFIED_COMMIT_MSG=$(echo -e "$COMMIT_MSG\n\n$MIRROR_MSG @ $sha") + rsync -av "$API_WORKING_DIR"/api/* "$CHECKOUT_DIR" + git -C "$CHECKOUT_DIR" add . + git -C "$CHECKOUT_DIR" commit -m "$QUALIFIED_COMMIT_MSG" + done + git worktree remove --force "$API_WORKING_DIR" + + echo "Pushing..." + git -C "$CHECKOUT_DIR" push origin master + echo "Done" +fi diff --git a/ci/do_ci.sh b/ci/do_ci.sh index fe6a2428fc2e..177b0ee1546b 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -111,6 +111,14 @@ elif [[ "$1" == "bazel.ipv6_tests" ]]; then cd "${ENVOY_CI_DIR}" bazel --batch test ${BAZEL_TEST_OPTIONS} -c fastbuild //test/integration/... //test/common/network/... exit 0 +elif [[ "$1" == "bazel.api" ]]; then + setup_clang_toolchain + cd "${ENVOY_CI_DIR}" + echo "Building API..." + bazel --batch build ${BAZEL_BUILD_OPTIONS} -c fastbuild @envoy_api//envoy/... + echo "Testing API..." + bazel --batch test ${BAZEL_TEST_OPTIONS} -c fastbuild @envoy_api//test/... @envoy_api//tools/... + exit 0 elif [[ "$1" == "bazel.coverage" ]]; then setup_gcc_toolchain echo "bazel coverage build with tests..." @@ -165,7 +173,8 @@ elif [[ "$1" == "check_format" ]]; then ./tools/check_format.py check exit 0 elif [[ "$1" == "docs" ]]; then - docs/publish.sh + echo "generating docs..." + docs/build.sh exit 0 else echo "Invalid do_ci.sh target, see ci/README.md for valid targets." diff --git a/docs/README.md b/docs/README.md index 3d2dfa1373a4..119596fec980 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,11 +1,17 @@ # Developer-local docs build -See [data-plane-api](https://github.com/envoyproxy/data-plane-api/blob/master/docs/README.md). +```bash +./docs/build.sh +``` + +The output can be found in `generated/docs`. # How the Envoy website and docs are updated 1. The docs are published to [docs/envoy/latest](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy/latest) - on every commit to master in [data-plane-api](https://github.com/envoyproxy/data-plane-api). + on every commit to master. This process is handled by CircleCI with the + [`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script. + 2. The docs are published to [docs/envoy](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy) in a directory named after every tagged commit in this repo. Thus, on every tagged release there are snapped docs. diff --git a/docs/build.sh b/docs/build.sh new file mode 100755 index 000000000000..b04f6e11c3a2 --- /dev/null +++ b/docs/build.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +set -e + +# We need to set ENVOY_DOCS_VERSION_STRING and ENVOY_DOCS_RELEASE_LEVEL for Sphinx. +# We also validate that the tag and version match at this point if needed. +if [ -n "$CIRCLE_TAG" ] +then + # Check the git tag matches the version number in the VERSION file. + VERSION_NUMBER=$(cat VERSION) + if [ "v${VERSION_NUMBER}" != "${CIRCLE_TAG}" ]; then + echo "Given git tag does not match the VERSION file content:" + echo "${CIRCLE_TAG} vs $(cat VERSION)" + exit 1 + fi + # Check the version_history.rst contains current release version. + grep --fixed-strings "$VERSION_NUMBER" docs/root/intro/version_history.rst + # Now that we now there is a match, we can use the tag. + export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG" + export ENVOY_DOCS_RELEASE_LEVEL=tagged +else + BUILD_SHA=$(git rev-parse HEAD) + VERSION_NUM=$(cat VERSION) + export ENVOY_DOCS_VERSION_STRING="${VERSION_NUM}"-data-plane-api-"${BUILD_SHA:0:6}" + export ENVOY_DOCS_RELEASE_LEVEL=pre-release +fi + +SCRIPT_DIR=$(dirname "$0") +BUILD_DIR=build_docs +[[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs +[[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst + +rm -rf "${DOCS_OUTPUT_DIR}" +mkdir -p "${DOCS_OUTPUT_DIR}" + +rm -rf "${GENERATED_RST_DIR}" +mkdir -p "${GENERATED_RST_DIR}" + +if [ ! -d "${BUILD_DIR}"/venv ]; then + virtualenv "${BUILD_DIR}"/venv --no-site-packages --python=python2.7 + "${BUILD_DIR}"/venv/bin/pip install -r "${SCRIPT_DIR}"/requirements.txt +fi + +source "${BUILD_DIR}"/venv/bin/activate + +bazel --batch build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \ + tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED --spawn_strategy=standalone + +# These are the protos we want to put in docs, this list will grow. +# TODO(htuch): Factor this out of this script. +PROTO_RST=" + /envoy/api/v2/core/address/envoy/api/v2/core/address.proto.rst + /envoy/api/v2/core/base/envoy/api/v2/core/base.proto.rst + /envoy/api/v2/core/config_source/envoy/api/v2/core/config_source.proto.rst + /envoy/api/v2/core/grpc_service/envoy/api/v2/core/grpc_service.proto.rst + /envoy/api/v2/core/health_check/envoy/api/v2/core/health_check.proto.rst + /envoy/api/v2/core/protocol/envoy/api/v2/core/protocol.proto.rst + /envoy/api/v2/auth/cert/envoy/api/v2/auth/cert.proto.rst + /envoy/api/v2/eds/envoy/api/v2/eds.proto.rst + /envoy/api/v2/endpoint/endpoint/envoy/api/v2/endpoint/endpoint.proto.rst + /envoy/api/v2/cds/envoy/api/v2/cds.proto.rst + /envoy/api/v2/cluster/outlier_detection/envoy/api/v2/cluster/outlier_detection.proto.rst + /envoy/api/v2/cluster/circuit_breaker/envoy/api/v2/cluster/circuit_breaker.proto.rst + /envoy/api/v2/rds/envoy/api/v2/rds.proto.rst + /envoy/api/v2/route/route/envoy/api/v2/route/route.proto.rst + /envoy/api/v2/lds/envoy/api/v2/lds.proto.rst + /envoy/api/v2/listener/listener/envoy/api/v2/listener/listener.proto.rst + /envoy/api/v2/ratelimit/ratelimit/envoy/api/v2/ratelimit/ratelimit.proto.rst + /envoy/config/bootstrap/v2/bootstrap/envoy/config/bootstrap/v2/bootstrap.proto.rst + /envoy/api/v2/discovery/envoy/api/v2/discovery.proto.rst + /envoy/config/ratelimit/v2/rls/envoy/config/ratelimit/v2/rls.proto.rst + /envoy/config/metrics/v2/metrics_service/envoy/config/metrics/v2/metrics_service.proto.rst + /envoy/config/metrics/v2/stats/envoy/config/metrics/v2/stats.proto.rst + /envoy/config/trace/v2/trace/envoy/config/trace/v2/trace.proto.rst + /envoy/config/filter/accesslog/v2/accesslog/envoy/config/filter/accesslog/v2/accesslog.proto.rst + /envoy/config/filter/fault/v2/fault/envoy/config/filter/fault/v2/fault.proto.rst + /envoy/config/filter/http/buffer/v2/buffer/envoy/config/filter/http/buffer/v2/buffer.proto.rst + /envoy/config/filter/http/fault/v2/fault/envoy/config/filter/http/fault/v2/fault.proto.rst + /envoy/config/filter/http/gzip/v2/gzip/envoy/config/filter/http/gzip/v2/gzip.proto.rst + /envoy/config/filter/http/health_check/v2/health_check/envoy/config/filter/http/health_check/v2/health_check.proto.rst + /envoy/config/filter/http/ip_tagging/v2/ip_tagging/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto.rst + /envoy/config/filter/http/lua/v2/lua/envoy/config/filter/http/lua/v2/lua.proto.rst + /envoy/config/filter/http/rate_limit/v2/rate_limit/envoy/config/filter/http/rate_limit/v2/rate_limit.proto.rst + /envoy/config/filter/http/router/v2/router/envoy/config/filter/http/router/v2/router.proto.rst + /envoy/config/filter/http/squash/v2/squash/envoy/config/filter/http/squash/v2/squash.proto.rst + /envoy/config/filter/http/transcoder/v2/transcoder/envoy/config/filter/http/transcoder/v2/transcoder.proto.rst + /envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto.rst + /envoy/config/filter/network/http_connection_manager/v2/http_connection_manager/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto.rst + /envoy/config/filter/network/mongo_proxy/v2/mongo_proxy/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto.rst + /envoy/config/filter/network/rate_limit/v2/rate_limit/envoy/config/filter/network/rate_limit/v2/rate_limit.proto.rst + /envoy/config/filter/network/redis_proxy/v2/redis_proxy/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto.rst + /envoy/config/filter/network/tcp_proxy/v2/tcp_proxy/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto.rst + /envoy/type/percent/envoy/type/percent.proto.rst + /envoy/type/range/envoy/type/range.proto.rst +" + +# Dump all the generated RST so they can be added to PROTO_RST easily. +find -L bazel-bin/external/envoy_api -name "*.proto.rst" + +# Only copy in the protos we care about and know how to deal with in protodoc. +for p in $PROTO_RST +do + DEST="${GENERATED_RST_DIR}/api-v2/$(sed -e 's#/envoy\/.*/envoy/##' <<< "$p")" + mkdir -p "$(dirname "${DEST}")" + cp -f bazel-bin/external/envoy_api/"${p}" "$(dirname "${DEST}")" + [ -n "${CPROFILE_ENABLED}" ] && cp -f bazel-bin/"${p}".profile "$(dirname "${DEST}")" +done + +rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${GENERATED_RST_DIR}" + +sphinx-build -W -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000000..acdb58837e40 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- +# +# envoy documentation build configuration file, created by +# sphinx-quickstart on Sat May 28 10:51:27 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sphinx_rtd_theme +import sys +import os + +def setup(app): + app.add_config_value('release_level', '', 'env') + +if not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'): + raise Exception("ENVOY_DOCS_RELEASE_LEVEL env var must be defined") + +release_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL'] + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig'] +extlinks = { + 'repo': ('https://github.com/envoyproxy/envoy/blob/master/%s', ''), + 'api': ('https://github.com/envoyproxy/envoy/blob/master/api/%s', ''), +} + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'envoy' +copyright = u'2016-2018, Envoy Project Authors' +author = u'Envoy Project Authors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. + +if not os.environ.get('ENVOY_DOCS_VERSION_STRING'): + raise Exception("ENVOY_DOCS_VERSION_STRING env var must be defined") + +# The short X.Y version. +version = os.environ['ENVOY_DOCS_VERSION_STRING'] +# The full version, including alpha/beta/rc tags. +release = os.environ['ENVOY_DOCS_VERSION_STRING'] + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', '_venv', 'Thumbs.db', '.DS_Store'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +#html_title = u'envoy v1.0.0' + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +html_favicon = 'favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +#html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'envoydoc' diff --git a/docs/publish.sh b/docs/publish.sh index 5bd0d3a2a43b..b146c42994cb 100755 --- a/docs/publish.sh +++ b/docs/publish.sh @@ -1,45 +1,29 @@ #!/bin/bash -set -e +# This is run on every commit that CircleCI picks up. It assumes that docs have already been built +# via docs/build.sh. The push behavior differs depending on the nature of the commit: +# * Tag commit (e.g. v1.6.0): pushes docs to versioned location, e.g. +# https://www.envoyproxy.io/docs/envoy/v1.6.0/. +# * Master commit: pushes docs to https://www.envoyproxy.io/docs/envoy/latest/. +# * Otherwise: noop. -if [ -z "$CIRCLE_TAG" ] -then - echo "Ignoring non-tag for docs push" - exit 0 -fi - -export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG" -export ENVOY_DOCS_RELEASE_LEVEL=tagged - -# Build the docs by cloning data-plane-api and building at the embedded SHA. -DOCS_BUILD_DIR="${BUILD_DIR}"/docs -rm -rf "${DOCS_BUILD_DIR}" generated/docs generated/rst -mkdir -p "${DOCS_BUILD_DIR}" -ENVOY_API=$(bazel/git_repository_info.py envoy_api) -read -a GIT_INFO <<< "${ENVOY_API}" -pushd "${DOCS_BUILD_DIR}" -git clone "${GIT_INFO[0]}" -cd data-plane-api -git checkout "${GIT_INFO[1]}" -# Check the git tag matches the version number in the VERSION file. -VERSION_NUMBER=$(cat VERSION) -if [ "v${VERSION_NUMBER}" != "${CIRCLE_TAG}" ]; then - echo "Given git tag does not match the VERSION file content:" - echo "${CIRCLE_TAG} vs $(cat VERSION)" - exit 1 -fi -# Check the version_history.rst contains current release version. -grep --fixed-strings "$VERSION_NUMBER" docs/root/intro/version_history.rst -./docs/build.sh -popd -rsync -av "${DOCS_BUILD_DIR}"/data-plane-api/generated/* generated/ +set -e -# Now publish them into a directory specific to the tag. DOCS_DIR=generated/docs CHECKOUT_DIR=../envoy-docs -PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/"$CIRCLE_TAG" BUILD_SHA=`git rev-parse HEAD` +if [ -n "$CIRCLE_TAG" ] +then + PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/"$CIRCLE_TAG" +elif [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] +then + PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/latest +else + echo "Ignoring docs push" + exit 0 +fi + echo 'cloning' git clone git@github.com:envoyproxy/envoyproxy.github.io "$CHECKOUT_DIR" diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000000..b5285c86366e --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,19 @@ +GitPython==2.0.8 +Jinja2==2.9.6 +MarkupSafe==1.0 +Pygments==2.2.0 +alabaster==0.7.10 +babel==2.4.0 +docutils==0.12 +gitdb==0.6.4 +imagesize==0.7.1 +pytz==2017.2 +requests==2.13.0 +six==1.10.0 +smmap==0.9.0 +snowballstemmer==1.2.1 +sphinx==1.6.5 +sphinxcontrib-httpdomain==1.5.0 + +# Fix for https://github.com/rtfd/sphinx_rtd_theme/issues/397 +git+https://github.com/rtfd/sphinx_rtd_theme@9d704f287ac197dfb1c9b27f0acfb91267dce4f1 diff --git a/docs/root/_static/docker_compose_v0.1.svg b/docs/root/_static/docker_compose_v0.1.svg new file mode 100644 index 000000000000..55236771d500 --- /dev/null +++ b/docs/root/_static/docker_compose_v0.1.svg @@ -0,0 +1,4 @@ + + + + diff --git a/docs/root/_static/double_proxy.svg b/docs/root/_static/double_proxy.svg new file mode 100644 index 000000000000..60a9cfcade0f --- /dev/null +++ b/docs/root/_static/double_proxy.svg @@ -0,0 +1,4 @@ + + + + diff --git a/docs/root/_static/front_proxy.svg b/docs/root/_static/front_proxy.svg new file mode 100644 index 000000000000..97c2a325232a --- /dev/null +++ b/docs/root/_static/front_proxy.svg @@ -0,0 +1,4 @@ + + + + diff --git a/docs/root/_static/placeholder b/docs/root/_static/placeholder new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/root/_static/service_to_service.svg b/docs/root/_static/service_to_service.svg new file mode 100644 index 000000000000..08f4cb398bc3 --- /dev/null +++ b/docs/root/_static/service_to_service.svg @@ -0,0 +1,4 @@ + + + + diff --git a/docs/root/about_docs.rst b/docs/root/about_docs.rst new file mode 100644 index 000000000000..b0544dd30f38 --- /dev/null +++ b/docs/root/about_docs.rst @@ -0,0 +1,20 @@ +About the documentation +======================= + +The Envoy documentation is composed of a few major sections: + +* :ref:`Introduction `: This section covers a general overview of what Envoy is, an + architecture overview, how it is typically deployed, etc. +* :ref:`Getting Started `: Quickly get started with Envoy using Docker. +* :ref:`Installation `: How to build/install Envoy using Docker. +* :ref:`Configuration `: Detailed configuration instructions common to both the legacy v1 + API and the new v2 API. Where relevant, the configuration guide also contains information on + statistics, runtime configuration, and APIs. +* :ref:`Operations `: General information on how to operate Envoy including the command + line interface, hot restart wrapper, administration interface, a general statistics overview, + etc. +* :ref:`Extending Envoy `: Information on how to write custom filters for Envoy. +* :ref:`v1 API reference `: Configuration details specific to the legacy + v1 API. +* :ref:`v2 API reference `: Configuration details specific to the new v2 API. +* :ref:`Envoy FAQ `: Have questions? We have answers. Hopefully. diff --git a/docs/root/api-v1/access_log.rst b/docs/root/api-v1/access_log.rst new file mode 100644 index 000000000000..1dbcb559dfcb --- /dev/null +++ b/docs/root/api-v1/access_log.rst @@ -0,0 +1,181 @@ +.. _config_access_log_v1: + +Access logging +============== + +Configuration +------------- + +.. code-block:: json + + { + "access_log": [ + { + "path": "...", + "format": "...", + "filter": "{...}", + }, + ] + } + +.. _config_access_log_path_param: + +path + *(required, string)* Path the access log is written to. + +.. _config_access_log_format_param: + +format + *(optional, string)* Access log format. Envoy supports :ref:`custom access log formats + ` as well as a :ref:`default format + `. + +.. _config_access_log_filter_param: + +filter + *(optional, object)* :ref:`Filter ` which is used + to determine if the access log needs to be written. + +.. _config_http_con_manager_access_log_filters_v1: + +Filters +------- + +Envoy supports the following access log filters: + +.. contents:: + :local: + +Status code +^^^^^^^^^^^ + +.. code-block:: json + + { + "filter": { + "type": "status_code", + "op": "...", + "value": "...", + "runtime_key": "..." + } + } + +Filters on HTTP response/status code. + +op + *(required, string)* Comparison operator. Currently *>=* and *=* are the only supported operators. + +value + *(required, integer)* Default value to compare against if runtime value is not available. + +runtime_key + *(optional, string)* Runtime key to get value for comparison. This value is used if defined. + +Duration +^^^^^^^^ + +.. code-block:: json + + { + "filter": { + "type": "duration", + "op": "..", + "value": "...", + "runtime_key": "..." + } + } + +Filters on total request duration in milliseconds. + +op + *(required, string)* Comparison operator. Currently *>=* and *=* are the only supported operators. + +value + *(required, integer)* Default value to compare against if runtime values is not available. + +runtime_key + *(optional, string)* Runtime key to get value for comparison. This value is used if defined. + + +Not health check +^^^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "filter": { + "type": "not_healthcheck" + } + } + +Filters for requests that are not health check requests. A health check request is marked by +the :ref:`health check filter `. + +Traceable +^^^^^^^^^ + +.. code-block:: json + + { + "filter": { + "type": "traceable_request" + } + } + +Filters for requests that are traceable. See the :ref:`tracing overview ` for +more information on how a request becomes traceable. + + +.. _config_http_con_manager_access_log_filters_runtime_v1: + +Runtime +^^^^^^^^^ +.. code-block:: json + + { + "filter": { + "type": "runtime", + "key" : "..." + } + } + +Filters for random sampling of requests. Sampling pivots on the header +:ref:`x-request-id` being present. If +:ref:`x-request-id` is present, the filter will +consistently sample across multiple hosts based on the runtime key value and the value extracted +from :ref:`x-request-id`. If it is missing, the +filter will randomly sample based on the runtime key value. + +key + *(required, string)* Runtime key to get the percentage of requests to be sampled. + This runtime control is specified in the range 0-100 and defaults to 0. + +And +^^^ + +.. code-block:: json + + { + "filter": { + "type": "logical_and", + "filters": [] + } + } + +Performs a logical "and" operation on the result of each filter in *filters*. Filters are evaluated +sequentially and if one of them returns false, the filter returns false immediately. + +Or +^^ + +.. code-block:: json + + { + "filter": { + "type": "logical_or", + "filters": [] + } + } + +Performs a logical "or" operation on the result of each individual filter. Filters are evaluated +sequentially and if one of them returns true, the filter returns true immediately. diff --git a/docs/root/api-v1/admin.rst b/docs/root/api-v1/admin.rst new file mode 100644 index 000000000000..5e65ac763da6 --- /dev/null +++ b/docs/root/api-v1/admin.rst @@ -0,0 +1,27 @@ +.. _config_admin_v1: + +Administration interface +======================== + +Administration interface :ref:`operations documentation `. + +.. code-block:: json + + { + "access_log_path": "...", + "profile_path": "...", + "address": "..." + } + +access_log_path + *(required, string)* The path to write the access log for the administration server. If no + access log is desired specify '/dev/null'. + +profile_path + *(optional, string)* The cpu profiler output path for the administration server. If no profile + path is specified, the default is '/var/log/envoy/envoy.prof'. + +address + *(required, string)* The TCP address that the administration server will listen on, e.g., + "tcp://127.0.0.1:1234". Note, "tcp://0.0.0.0:1234" is the wild card match for any IPv4 address + with port 1234. diff --git a/docs/root/api-v1/api.rst b/docs/root/api-v1/api.rst new file mode 100644 index 000000000000..61545ce3ad7e --- /dev/null +++ b/docs/root/api-v1/api.rst @@ -0,0 +1,19 @@ +.. _envoy_v1_api_reference: + +v1 API reference +================ + +.. toctree:: + :glob: + :maxdepth: 2 + + listeners/listeners + network_filters/network_filters + route_config/route_config + http_filters/http_filters + cluster_manager/cluster_manager + access_log + admin + rate_limit + runtime + tracing diff --git a/docs/root/api-v1/cluster_manager/cds.rst b/docs/root/api-v1/cluster_manager/cds.rst new file mode 100644 index 000000000000..37354d6bece5 --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cds.rst @@ -0,0 +1,45 @@ +.. _config_cluster_manager_cds_v1: + +Cluster discovery service +========================= + +.. code-block:: json + + { + "cluster": "{...}", + "refresh_delay_ms": "..." + } + +:ref:`cluster ` + *(required, object)* A standard definition of an upstream cluster that hosts the cluster + discovery service. The cluster must run a REST service that implements the :ref:`CDS HTTP API + `. + +refresh_delay_ms + *(optional, integer)* The delay, in milliseconds, between fetches to the CDS API. Envoy will add + an additional random jitter to the delay that is between zero and *refresh_delay_ms* + milliseconds. Thus the longest possible refresh delay is 2 \* *refresh_delay_ms*. Default value + is 30000ms (30 seconds). + +.. _config_cluster_manager_cds_api: + +REST API +-------- + +.. http:get:: /v1/clusters/(string: service_cluster)/(string: service_node) + +Asks the discovery service to return all clusters for a particular `service_cluster` and +`service_node`. `service_cluster` corresponds to the :option:`--service-cluster` CLI option. +`service_node` corresponds to the :option:`--service-node` CLI option. Responses use the following +JSON schema: + +.. code-block:: json + + { + "clusters": [] + } + +clusters + *(Required, array)* A list of :ref:`clusters ` that will be + dynamically added/modified within the cluster manager. Envoy will reconcile this list with the + clusters that are currently loaded and either add/modify/remove clusters as necessary. diff --git a/docs/root/api-v1/cluster_manager/cluster.rst b/docs/root/api-v1/cluster_manager/cluster.rst new file mode 100644 index 000000000000..944774edfd4f --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster.rst @@ -0,0 +1,205 @@ +.. _config_cluster_manager_cluster: + +Cluster +======= + +.. code-block:: json + + { + "name": "...", + "type": "...", + "connect_timeout_ms": "...", + "per_connection_buffer_limit_bytes": "...", + "lb_type": "...", + "ring_hash_lb_config": "{...}", + "hosts": [], + "service_name": "...", + "health_check": "{...}", + "max_requests_per_connection": "...", + "circuit_breakers": "{...}", + "ssl_context": "{...}", + "features": "...", + "http2_settings": "{...}", + "cleanup_interval_ms": "...", + "dns_refresh_rate_ms": "...", + "dns_lookup_family": "...", + "dns_resolvers": [], + "outlier_detection": "{...}" + } + +.. _config_cluster_manager_cluster_name: + +name + *(required, string)* Supplies the name of the cluster which must be unique across all clusters. + The cluster name is used when emitting :ref:`statistics `. + By default, the maximum length of a cluster name is limited to 60 characters. This limit can be + increased by setting the :option:`--max-obj-name-len` command line argument to the desired value. + +.. _config_cluster_manager_type: + +type + *(required, string)* The :ref:`service discovery type ` to + use for resolving the cluster. Possible options are *static*, *strict_dns*, *logical_dns*, + :ref:`*original_dst* `, and *sds*. + +connect_timeout_ms + *(required, integer)* The timeout for new network connections to hosts in the cluster specified + in milliseconds. + +.. _config_cluster_manager_cluster_per_connection_buffer_limit_bytes: + +per_connection_buffer_limit_bytes + *(optional, integer)* Soft limit on size of the cluster's connections read and write buffers. + If unspecified, an implementation defined default is applied (1MiB). + +.. _config_cluster_manager_cluster_lb_type: + +lb_type + *(required, string)* The :ref:`load balancer type ` to use + when picking a host in the cluster. Possible options are *round_robin*, *least_request*, + *ring_hash*, *random*, and *original_dst_lb*. Note that :ref:`*original_dst_lb* + ` must be used with clusters of type + :ref:`*original_dst* `, and may not be + used with any other cluster type. + +:ref:`ring_hash_lb_config ` + *(optional, object)* Optional configuration for the ring hash load balancer, used when *lb_type* + is set to *ring_hash*. + +hosts + *(sometimes required, array)* If the service discovery type is *static*, *strict_dns*, or + *logical_dns* the hosts array is required. Hosts array is not allowed with cluster type + *original_dst*. How it is specified depends on the type of service discovery: + + static + Static clusters must use fully resolved hosts that require no DNS lookups. Both TCP and unix + domain sockets (UDS) addresses are supported. A TCP address looks like: + + ``tcp://:`` + + A UDS address looks like: + + ``unix://`` + + A list of addresses can be specified as in the following example: + + .. code-block:: json + + [{"url": "tcp://10.0.0.2:1234"}, {"url": "tcp://10.0.0.3:5678"}] + + strict_dns + Strict DNS clusters can specify any number of hostname:port combinations. All names will be + resolved using DNS and grouped together to form the final cluster. If multiple records are + returned for a single name, all will be used. For example: + + .. code-block:: json + + [{"url": "tcp://foo1.bar.com:1234"}, {"url": "tcp://foo2.bar.com:5678"}] + + logical_dns + Logical DNS clusters specify hostnames much like strict DNS, however only the first host will be + used. For example: + + .. code-block:: json + + [{"url": "tcp://foo1.bar.com:1234"}] + +.. _config_cluster_manager_cluster_service_name: + +service_name + *(sometimes required, string)* This parameter is required if the service discovery type is *sds*. + It will be passed to the :ref:`SDS API ` when fetching cluster + members. + +:ref:`health_check ` + *(optional, object)* Optional :ref:`active health checking ` + configuration for the cluster. If no configuration is specified no health checking will be done + and all cluster members will be considered healthy at all times. + +max_requests_per_connection + *(optional, integer)* Optional maximum requests for a single upstream connection. This + parameter is respected by both the HTTP/1.1 and HTTP/2 connection pool implementations. If not + specified, there is no limit. Setting this parameter to 1 will effectively disable keep alive. + +:ref:`circuit_breakers ` + *(optional, object)* Optional :ref:`circuit breaking ` settings + for the cluster. + +:ref:`ssl_context ` + *(optional, object)* The TLS configuration for connections to the upstream cluster. If no TLS + configuration is specified, TLS will not be used for new connections. + +.. _config_cluster_manager_cluster_features: + +features + *(optional, string)* A comma delimited list of features that the upstream cluster supports. + The currently supported features are: + + http2 + If *http2* is specified, Envoy will assume that the upstream supports HTTP/2 when making new + HTTP connection pool connections. Currently, Envoy only supports prior knowledge for upstream + connections. Even if TLS is used with ALPN, *http2* must be specified. As an aside this allows + HTTP/2 connections to happen over plain text. + +.. _config_cluster_manager_cluster_http2_settings: + +http2_settings + *(optional, object)* Additional HTTP/2 settings that are passed directly to the HTTP/2 codec when + initiating HTTP connection pool connections. These are the same options supported in the HTTP connection + manager :ref:`http2_settings ` option. + +.. _config_cluster_manager_cluster_cleanup_interval_ms: + +cleanup_interval_ms + *(optional, integer)* The interval for removing stale hosts from an *original_dst* cluster. Hosts + are considered stale if they have not been used as upstream destinations during this interval. + New hosts are added to original destination clusters on demand as new connections are redirected + to Envoy, causing the number of hosts in the cluster to grow over time. Hosts that are not stale + (they are actively used as destinations) are kept in the cluster, which allows connections to + them remain open, saving the latency that would otherwise be spent on opening new connections. + If this setting is not specified, the value defaults to 5000. For cluster types other than + *original_dst* this setting is ignored. + +.. _config_cluster_manager_cluster_dns_refresh_rate_ms: + +dns_refresh_rate_ms + *(optional, integer)* If the dns refresh rate is specified and the cluster type is either *strict_dns*, + or *logical_dns*, this value is used as the cluster's dns refresh rate. If this setting is not specified, + the value defaults to 5000. For cluster types other than *strict_dns* and *logical_dns* this setting is + ignored. + +.. _config_cluster_manager_cluster_dns_lookup_family: + +dns_lookup_family + *(optional, string)* The DNS IP address resolution policy. The options are *v4_only*, *v6_only*, + and *auto*. If this setting is not specified, the value defaults to *v4_only*. When *v4_only* is selected, + the DNS resolver will only perform a lookup for addresses in the IPv4 family. If *v6_only* is selected, + the DNS resolver will only perform a lookup for addresses in the IPv6 family. If *auto* is specified, + the DNS resolver will first perform a lookup for addresses in the IPv6 family and fallback to a lookup for + addresses in the IPv4 family. For cluster types other than *strict_dns* and *logical_dns*, this setting + is ignored. + +.. _config_cluster_manager_cluster_dns_resolvers: + +dns_resolvers + *(optional, array)* If DNS resolvers are specified and the cluster type is either *strict_dns*, or + *logical_dns*, this value is used to specify the cluster's dns resolvers. If this setting is not + specified, the value defaults to the default resolver, which uses /etc/resolv.conf for + configuration. For cluster types other than *strict_dns* and *logical_dns* this setting is + ignored. + +.. _config_cluster_manager_cluster_outlier_detection_summary: + +:ref:`outlier_detection ` + *(optional, object)* If specified, outlier detection will be enabled for this upstream cluster. + See the :ref:`architecture overview ` for more information on outlier + detection. + +.. toctree:: + :hidden: + + cluster_hc + cluster_circuit_breakers + cluster_ssl + cluster_outlier_detection + cluster_ring_hash_lb_config diff --git a/docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst b/docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst new file mode 100644 index 000000000000..6ae740753a2c --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst @@ -0,0 +1,64 @@ +.. _config_cluster_manager_cluster_circuit_breakers_v1: + +Circuit breakers +================ + +* Circuit breaking :ref:`architecture overview `. +* Priority routing :ref:`architecture overview `. + +Circuit breaking settings can be specified individually for each defined priority. How the +different priorities are used are documented in the sections of the configuration guide that use +them. + +.. code-block:: json + + { + "default": "{...}", + "high": "{...}" + } + +default + *(optional, object)* Settings object for default priority. + +high + *(optional, object)* Settings object for high priority. + +Per priority settings +--------------------- + +.. code-block:: json + + { + "max_connections": "...", + "max_pending_requests": "...", + "max_requests": "...", + "max_retries": "...", + } + +.. _config_cluster_manager_cluster_circuit_breakers_max_connections: + +max_connections + *(optional, integer)* The maximum number of connections that Envoy will make to the upstream + cluster. If not specified, the default is 1024. See the :ref:`circuit breaking overview + ` for more information. + +.. _config_cluster_manager_cluster_circuit_breakers_max_pending_requests: + +max_pending_requests + *(optional, integer)* The maximum number of pending requests that Envoy will allow to the upstream + cluster. If not specified, the default is 1024. See the :ref:`circuit breaking overview + ` for more information. + +.. _config_cluster_manager_cluster_circuit_breakers_max_requests: + +max_requests + *(optional, integer)* The maximum number of parallel requests that Envoy will make to the upstream + cluster. If not specified, the default is 1024. See the :ref:`circuit breaking overview + ` for more information. + +.. _config_cluster_manager_cluster_circuit_breakers_max_retries: + +max_retries + *(optional, integer)* The maximum number of parallel retries that Envoy will allow to the upstream + cluster. If not specified, the default is 3. See the :ref:`circuit breaking overview + ` for more information. diff --git a/docs/root/api-v1/cluster_manager/cluster_hc.rst b/docs/root/api-v1/cluster_manager/cluster_hc.rst new file mode 100644 index 000000000000..0c52ef0c7e1f --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster_hc.rst @@ -0,0 +1,91 @@ +.. _config_cluster_manager_cluster_hc_v1: + +Health checking +=============== + +* Health checking :ref:`architecture overview `. +* If health checking is configured for a cluster, additional statistics are emitted. They are + documented :ref:`here `. + +.. code-block:: json + + { + "type": "...", + "timeout_ms": "...", + "interval_ms": "...", + "unhealthy_threshold": "...", + "healthy_threshold": "...", + "path": "...", + "send": [], + "receive": [], + "interval_jitter_ms": "...", + "service_name": "...", + "redis_key": "..." + } + +type + *(required, string)* The type of health checking to perform. Currently supported types are + *http*, *redis*, and *tcp*. See the :ref:`architecture overview ` + for more information. + +timeout_ms + *(required, integer)* The time in milliseconds to wait for a health check response. If the + timeout is reached the health check attempt will be considered a failure. + +.. _config_cluster_manager_cluster_hc_interval: + +interval_ms + *(required, integer)* The interval between health checks in milliseconds. + +unhealthy_threshold + *(required, integer)* The number of unhealthy health checks required before a host is marked + unhealthy. Note that for *http* health checking if a host responds with 503 this threshold is + ignored and the host is considered unhealthy immediately. + +healthy_threshold + *(required, integer)* The number of healthy health checks required before a host is marked + healthy. Note that during startup, only a single successful health check is required to mark + a host healthy. + +path + *(sometimes required, string)* This parameter is required if the type is *http*. It specifies the + HTTP path that will be requested during health checking. For example */healthcheck*. + +send + *(sometimes required, array)* This parameter is required if the type is *tcp*. It specifies + the bytes to send for a health check request. It is an array of hex byte strings specified + as in the following example: + + .. code-block:: json + + [ + {"binary": "01"}, + {"binary": "000000FF"} + ] + + The array is allowed to be empty in the case of "connect only" health checking. + +receive + *(sometimes required, array)* This parameter is required if the type is *tcp*. It specified the + bytes that are expected in a successful health check response. It is an array of hex byte strings + specified similarly to the *send* parameter. The array is allowed to be empty in the case of + "connect only" health checking. + +interval_jitter_ms + *(optional, integer)* An optional jitter amount in millseconds. If specified, during every + internal Envoy will add 0 to *interval_jitter_ms* milliseconds to the wait time. + +.. _config_cluster_manager_cluster_hc_service_name: + +service_name + *(optional, string)* An optional service name parameter which is used to validate the identity of + the health checked cluster. See the :ref:`architecture overview + ` for more information. + +.. _config_cluster_manager_cluster_hc_redis_key: + +redis_key + *(optional, string)* If the type is *redis*, perform ``EXISTS `` instead of + ``PING``. A return value from Redis of 0 (does not exist) is considered a passing healthcheck. A + return value other than 0 is considered a failure. This allows the user to mark a Redis instance + for maintenance by setting the specified key to any value and waiting for traffic to drain. diff --git a/docs/root/api-v1/cluster_manager/cluster_manager.rst b/docs/root/api-v1/cluster_manager/cluster_manager.rst new file mode 100644 index 000000000000..ddd04bb7a577 --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster_manager.rst @@ -0,0 +1,51 @@ +.. _config_cluster_manager_v1: + +Cluster manager +=============== + +.. toctree:: + :hidden: + + cluster + outlier + cds + sds + +Cluster manager :ref:`architecture overview `. + +.. code-block:: json + + { + "clusters": [], + "sds": "{...}", + "local_cluster_name": "...", + "outlier_detection": "{...}", + "cds": "{...}" + } + +.. _config_cluster_manager_clusters: + +:ref:`clusters ` + *(required, array)* A list of upstream clusters that the cluster manager performs + :ref:`service discovery `, + :ref:`health checking `, and + :ref:`load balancing ` on. + +:ref:`sds ` + *(sometimes required, object)* If any defined clusters use the :ref:`sds + ` cluster type, a global SDS configuration must be specified. + +.. _config_cluster_manager_local_cluster_name: + +local_cluster_name + *(optional, string)* Name of the local cluster (i.e., the cluster that owns the Envoy running this + configuration). In order to enable + :ref:`zone aware routing ` this option must be + set. If *local_cluster_name* is defined then :ref:`clusters ` + must contain a definition of a cluster with the same name. + +:ref:`outlier_detection ` + *(optional, object)* Optional global configuration for outlier detection. + +:ref:`cds ` + *(optional, object)* Optional configuration for the cluster discovery service (CDS) API. diff --git a/docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst b/docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst new file mode 100644 index 000000000000..9548bbc9fda7 --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst @@ -0,0 +1,101 @@ +.. _config_cluster_manager_cluster_outlier_detection: + +Outlier detection +================= + +.. code-block:: json + + { + "consecutive_5xx": "...", + "consecutive_gateway_failure": "...", + "interval_ms": "...", + "base_ejection_time_ms": "...", + "max_ejection_percent": "...", + "enforcing_consecutive_5xx" : "...", + "enforcing_consecutive_gateway_failure" : "...", + "enforcing_success_rate" : "...", + "success_rate_minimum_hosts" : "...", + "success_rate_request_volume" : "...", + "success_rate_stdev_factor" : "..." + } + +.. _config_cluster_manager_cluster_outlier_detection_consecutive_5xx: + +consecutive_5xx + *(optional, integer)* The number of consecutive 5xx responses before a consecutive 5xx ejection occurs. Defaults to 5. + +.. _config_cluster_manager_cluster_outlier_detection_consecutive_gateway_failure: + +consecutive_gateway_failure + *(optional, integer)* The number of consecutive "gateway errors" (502, 503 and 504 responses), + including those raised by Envoy for connection errors, before a consecutive gateway failure + ejection occurs. Defaults to 5. + +.. _config_cluster_manager_cluster_outlier_detection_interval_ms: + +interval_ms + *(optional, integer)* The time interval between ejection analysis sweeps. This can result in both new ejections as well + as hosts being returned to service. Defaults to 10000ms or 10s. + +.. _config_cluster_manager_cluster_outlier_detection_base_ejection_time_ms: + +base_ejection_time_ms + *(optional, integer)* The base time that a host is ejected for. The real time is equal to the base time multiplied by + the number of times the host has been ejected. Defaults to 30000ms or 30s. + +.. _config_cluster_manager_cluster_outlier_detection_max_ejection_percent: + +max_ejection_percent + *(optional, integer)* The maximum % of hosts in an upstream cluster that can be ejected due to outlier detection. + Defaults to 10%. + +.. _config_cluster_manager_cluster_outlier_detection_enforcing_consecutive_5xx: + +enforcing_consecutive_5xx + *(optional, integer)* The % chance that a host will be actually ejected when an outlier status is detected through + consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. + Defaults to 100 with 1% granularity. + +.. _config_cluster_manager_cluster_outlier_detection_enforcing_consecutive_gateway_failure: + +enforcing_consecutive_gateway_failure + *(optional, integer)* The % chance that a host will be actually ejected when an outlier status is + detected through consecutive gateway failure. This setting can be used to disable ejection or to + ramp it up slowly. Defaults to 0 with 1% granularity. + +.. _config_cluster_manager_cluster_outlier_detection_enforcing_success_rate: + +enforcing_success_rate + *(optional, integer)* The % chance that a host will be actually ejected when an outlier status is detected through + success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. + Defaults to 100 with 1% granularity. + +.. _config_cluster_manager_cluster_outlier_detection_success_rate_minimum_hosts: + +success_rate_minimum_hosts + *(optional, integer)* The number of hosts in a cluster that must have enough request volume to detect success rate outliers. + If the number of hosts is less than this setting, outlier detection via success rate statistics is not + performed for any host in the cluster. Defaults to 5. + +.. _config_cluster_manager_cluster_outlier_detection_success_rate_request_volume: + +success_rate_request_volume + *(optional, integer)* The minimum number of total requests that must be collected in one interval + (as defined by :ref:`interval_ms ` above) + to include this host in success rate based outlier detection. If the volume is lower than this setting, + outlier detection via success rate statistics is not performed for that host. Defaults to 100. + +.. _config_cluster_manager_cluster_outlier_detection_success_rate_stdev_factor: + +success_rate_stdev_factor + *(optional, integer)* This factor is used to determine the ejection threshold for success rate outlier ejection. + The ejection threshold is used as a measure to determine when a particular host has fallen below an acceptable + success rate. + The ejection threshold is the difference between the mean success rate, and the product of + this factor and the standard deviation of the mean success rate: + ``mean - (stdev * success_rate_stdev_factor)``. This factor is divided by a thousand to + get a ``double``. That is, if the desired factor is ``1.9``, the runtime value should be ``1900``. + Defaults to 1900. + +Each of the above configuration values can be overridden via +:ref:`runtime values `. diff --git a/docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst b/docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst new file mode 100644 index 000000000000..3b0a38e364c5 --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst @@ -0,0 +1,26 @@ +.. _config_cluster_manager_cluster_ring_hash_lb_config: + +Ring hash load balancer configuration +===================================== + +Ring hash load balancing settings are used when the *lb_type* is set to *ring_hash* in the +:ref:`cluster manager `. + +.. code-block:: json + + { + "minimum_ring_size": "...", + "use_std_hash": "..." + } + +minimum_ring_size + *(optional, integer)* Minimum hash ring size, i.e. total virtual nodes. A larger size will provide + better request distribution since each host in the cluster will have more virtual nodes. Defaults + to 1024. In the case that total number of hosts is greater than the minimum, each host will be + allocated a single virtual node. + +use_std_hash + *(optional, boolean)* Defaults to true, meaning that std::hash is used to hash hosts onto the + ketama ring. std::hash can vary by platform. For this reason, Envoy will eventually use + `xxHash `_ by default. This field exists for migration + purposes and will eventually be deprecated. Set it to false to use xxHash now. diff --git a/docs/root/api-v1/cluster_manager/cluster_ssl.rst b/docs/root/api-v1/cluster_manager/cluster_ssl.rst new file mode 100644 index 000000000000..291319505f9d --- /dev/null +++ b/docs/root/api-v1/cluster_manager/cluster_ssl.rst @@ -0,0 +1,82 @@ +.. _config_cluster_manager_cluster_ssl: + +TLS context +=========== + +.. code-block:: json + + { + "alpn_protocols": "...", + "cert_chain_file": "...", + "private_key_file": "...", + "ca_cert_file": "...", + "verify_certificate_hash": "...", + "verify_subject_alt_name": [], + "cipher_suites": "...", + "ecdh_curves": "...", + "sni": "..." + } + +alpn_protocols + *(optional, string)* Supplies the list of ALPN protocols that connections should request. In + practice this is likely to be set to a single value or not set at all: + + * "h2" If upstream connections should use HTTP/2. In the current implementation this must be set + alongside the *http2* cluster :ref:`features ` option. + The two options together will use ALPN to tell a server that expects ALPN that Envoy supports + HTTP/2. Then the *http2* feature will cause new connections to use HTTP/2. + +cert_chain_file + *(optional, string)* The certificate chain file that should be served by the connection. This is + used to provide a client side TLS certificate to an upstream host. + +private_key_file + *(optional, string)* The private key that corresponds to the certificate chain file. + +ca_cert_file + *(optional, string)* A file containing certificate authority certificates to use in verifying + a presented server certificate. + +verify_certificate_hash + *(optional, string)* If specified, Envoy will verify (pin) the hash of the presented server + certificate. + +verify_subject_alt_name + *(optional, array)* An optional list of subject alt names. If specified, Envoy will verify + that the server certificate's subject alt name matches one of the specified values. + +cipher_suites + *(optional, string)* If specified, the TLS connection will only support the specified `cipher list + `_. + If not specified, the default list: + +.. code-block:: none + + [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + ECDHE-ECDSA-AES128-SHA256 + ECDHE-RSA-AES128-SHA256 + ECDHE-ECDSA-AES128-SHA + ECDHE-RSA-AES128-SHA + AES128-GCM-SHA256 + AES128-SHA256 + AES128-SHA + ECDHE-ECDSA-AES256-GCM-SHA384 + ECDHE-RSA-AES256-GCM-SHA384 + ECDHE-ECDSA-AES256-SHA384 + ECDHE-RSA-AES256-SHA384 + ECDHE-ECDSA-AES256-SHA + ECDHE-RSA-AES256-SHA + AES256-GCM-SHA384 + AES256-SHA256 + AES256-SHA + +will be used. + +ecdh_curves + *(optional, string)* If specified, the TLS connection will only support the specified ECDH curves. + If not specified, the default curves (X25519, P-256) will be used. + +sni + *(optional, string)* If specified, the string will be presented as the SNI during the TLS + handshake. diff --git a/docs/root/api-v1/cluster_manager/outlier.rst b/docs/root/api-v1/cluster_manager/outlier.rst new file mode 100644 index 000000000000..6a87f4984570 --- /dev/null +++ b/docs/root/api-v1/cluster_manager/outlier.rst @@ -0,0 +1,15 @@ +.. _config_cluster_manager_outlier_detection: + +Outlier detection +================= + +Outlier detection :ref:`architecture overview `. + +.. code-block:: json + + { + "event_log_path": "..." + } + +event_log_path + *(optional, string)* Specifies the path to the outlier event log. diff --git a/docs/root/api-v1/cluster_manager/sds.rst b/docs/root/api-v1/cluster_manager/sds.rst new file mode 100644 index 000000000000..08d8d21305bc --- /dev/null +++ b/docs/root/api-v1/cluster_manager/sds.rst @@ -0,0 +1,85 @@ +.. _config_cluster_manager_sds: + +Service discovery service +========================= + +Service discovery service :ref:`architecture overview `. + +.. code-block:: json + + { + "cluster": "{...}", + "refresh_delay_ms": "{...}" + } + +:ref:`cluster ` + *(required, object)* A standard definition of an upstream cluster that hosts the service + discovery service. The cluster must run a REST service that implements the :ref:`SDS HTTP API + `. + +refresh_delay_ms + *(required, integer)* The delay, in milliseconds, between fetches to the SDS API for each + configured SDS cluster. Envoy will add an additional random jitter to the delay that is between + zero and *refresh_delay_ms* milliseconds. Thus the longest possible refresh delay is + 2 \* *refresh_delay_ms*. + +.. _config_cluster_manager_sds_api: + +REST API +-------- + +Envoy expects the service discovery service to expose the following API (See Lyft's +`reference implementation `_): + +.. http:get:: /v1/registration/(string: service_name) + + Asks the discovery service to return all hosts for a particular `service_name`. `service_name` + corresponds to the :ref:`service_name ` cluster + parameter. Responses use the following JSON schema: + + .. code-block:: json + + { + "hosts": [] + } + + hosts + *(Required, array)* A list of :ref:`hosts ` that make up + the service. + +.. _config_cluster_manager_sds_api_host: + +Host JSON +--------- + +.. code-block:: json + + { + "ip_address": "...", + "port": "...", + "tags": { + "az": "...", + "canary": "...", + "load_balancing_weight": "..." + } + } + +ip_address + *(required, string)* The IP address of the upstream host. + +port + *(required, integer)* The port of the upstream host. + +.. _config_cluster_manager_sds_api_host_az: + +az + *(optional, string)* The optional zone of the upstream host. Envoy uses the zone for various + statistics and load balancing tasks documented elsewhere. + +canary + *(optional, boolean)* The optional canary status of the upstream host. Envoy uses the canary + status for various statistics and load balancing tasks documented elsewhere. + +load_balancing_weight + *(optional, integer)* The optional load balancing weight of the upstream host, in the range + 1 - 100. Envoy uses the load balancing weight in some of the built in load balancers. diff --git a/docs/root/api-v1/http_filters/buffer_filter.rst b/docs/root/api-v1/http_filters/buffer_filter.rst new file mode 100644 index 000000000000..a57dc2cf9af7 --- /dev/null +++ b/docs/root/api-v1/http_filters/buffer_filter.rst @@ -0,0 +1,24 @@ +.. _config_http_filters_buffer_v1: + +Buffer +====== + +Buffer :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "buffer", + "config": { + "max_request_bytes": "...", + "max_request_time_s": "..." + } + } + +max_request_bytes + *(required, integer)* The maximum request size that the filter will buffer before the connection + manager will stop buffering and return a 413 response. + +max_request_time_s + *(required, integer)* The maximum number of seconds that the filter will wait for a complete + request before returning a 408 response. diff --git a/docs/root/api-v1/http_filters/cors_filter.rst b/docs/root/api-v1/http_filters/cors_filter.rst new file mode 100644 index 000000000000..747e7c892efd --- /dev/null +++ b/docs/root/api-v1/http_filters/cors_filter.rst @@ -0,0 +1,13 @@ +.. _config_http_filters_cors_v1: + +CORS filter +=========== + +Cors :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "cors", + "config": {} + } diff --git a/docs/root/api-v1/http_filters/dynamodb_filter.rst b/docs/root/api-v1/http_filters/dynamodb_filter.rst new file mode 100644 index 000000000000..cf9ef3bb7620 --- /dev/null +++ b/docs/root/api-v1/http_filters/dynamodb_filter.rst @@ -0,0 +1,19 @@ +.. _config_http_filters_dynamo_v1: + +DynamoDB +======== + +DynamoDB :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "http_dynamo_filter", + "config": {} + } + +name + *(required, string)* Filter name. The only supported value is `http_dynamo_filter`. + +config + *(required, object)* The filter does not use any configuration. diff --git a/docs/root/api-v1/http_filters/fault_filter.rst b/docs/root/api-v1/http_filters/fault_filter.rst new file mode 100644 index 000000000000..7d3433007a28 --- /dev/null +++ b/docs/root/api-v1/http_filters/fault_filter.rst @@ -0,0 +1,94 @@ +.. _config_http_filters_fault_injection_v1: + +Fault Injection +=============== + +Fault Injection :ref:`configuration overview `. + +Configuration +------------- + +.. code-block:: json + + { + "name" : "fault", + "config" : { + "abort" : "{...}", + "delay" : "{...}", + "upstream_cluster" : "...", + "headers" : [], + "downstream_nodes": [] + } + } + +:ref:`abort ` + *(sometimes required, object)* If specified, the filter will abort requests based on + the values in the object. At least *abort* or *delay* must be specified. + +:ref:`delay ` + *(sometimes required, object)* If specified, the filter will inject delays based on the values + in the object. At least *abort* or *delay* must be specified. + +upstream_cluster: + *(optional, string)* Specifies the name of the (destination) upstream + cluster that the filter should match on. Fault injection will be + restricted to requests bound to the specific upstream cluster. + +:ref:`headers ` + *(optional, array)* Specifies a set of headers that the filter should match on. The fault + injection filter can be applied selectively to requests that match a set of headers specified in + the fault filter config. The chances of actual fault injection further depend on the values of + *abort_percent* and *fixed_delay_percent* parameters. The filter will check the request's headers + against all the specified headers in the filter config. A match will happen if all the headers in + the config are present in the request with the same values (or based on presence if the *value* + field is not in the config). + +downstream_nodes: + *(optional, array)* Faults are injected for the specified list of downstream hosts. If this setting is + not set, faults are injected for all downstream nodes. Downstream node name is taken from + :ref:`the HTTP x-envoy-downstream-service-node ` + header and compared against downstream_nodes list. + +.. _config_http_filters_fault_injection_abort: + +Abort +----- +.. code-block:: json + + { + "abort_percent" : "...", + "http_status" : "..." + } + +abort_percent + *(required, integer)* The percentage of requests that + should be aborted with the specified *http_status* code. Valid values + range from 0 to 100. + +http_status + *(required, integer)* The HTTP status code that will be used as the + response code for the request being aborted. + +.. _config_http_filters_fault_injection_delay: + +Delay +----- +.. code-block:: json + + { + "type" : "...", + "fixed_delay_percent" : "...", + "fixed_duration_ms" : "..." + } + +type: + *(required, string)* Specifies the type of delay being + injected. Currently only *fixed* delay type (step function) is supported. + +fixed_delay_percent: + *(required, integer)* The percentage of requests that will + be delayed for the duration specified by *fixed_duration_ms*. Valid + values range from 0 to 100. + +fixed_duration_ms: + *(required, integer)* The delay duration in milliseconds. Must be greater than 0. diff --git a/docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst b/docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst new file mode 100644 index 000000000000..36cfa8126ade --- /dev/null +++ b/docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst @@ -0,0 +1,13 @@ +.. _config_http_filters_grpc_bridge_v1: + +gRPC HTTP/1.1 bridge +==================== + +gRPC HTTP/1.1 bridge :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "grpc_http1_bridge", + "config": {} + } diff --git a/docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst b/docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst new file mode 100644 index 000000000000..22f615b26508 --- /dev/null +++ b/docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst @@ -0,0 +1,64 @@ +.. _config_http_filters_grpc_json_transcoder_v1: + +gRPC-JSON transcoder filter +=========================== + +gRPC-JSON transcoder :ref:`configuration overview `. + +Configure gRPC-JSON transcoder +------------------------------ + +The filter config for the filter requires the descriptor file as well as a list of the gRPC +services to be transcoded. + +.. code-block:: json + + { + "name": "grpc_json_transcoder", + "config": { + "proto_descriptor": "proto.pb", + "services": ["grpc.service.Service"], + "print_options": { + "add_whitespace": false, + "always_print_primitive_fields": false, + "always_print_enums_as_ints": false, + "preserve_proto_field_names": false + } + } + } + +proto_descriptor + *(required, string)* Supplies the filename of + :ref:`the proto descriptor set ` for the gRPC + services. + +services + *(required, array)* A list of strings that supplies the service names that the + transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, Envoy + will fail at startup. The ``proto_descriptor`` may contain more services than the service names + specified here, but they won't be translated. + +print_options + *(optional, object)* Control options for response json. These options are passed directly to + `JsonPrintOptions `_. Valid options are: + + add_whitespace + *(optional, boolean)* Whether to add spaces, line breaks and indentation to make the JSON + output easy to read. Defaults to false. + + always_print_primitive_fields + *(optional, boolean)* Whether to always print primitive fields. By default primitive + fields with default values will be omitted in JSON output. For + example, an int32 field set to 0 will be omitted. Setting this flag to + true will override the default behavior and print primitive fields + regardless of their values. Defaults to false. + + always_print_enums_as_ints + *(optional, boolean)* Whether to always print enums as ints. By default they are rendered + as strings. Defaults to false. + + preserve_proto_field_names + *(optional, boolean)* Whether to preserve proto field names. By default protobuf will + generate JSON field names using the ``json_name`` option, or lower camel case, + in that order. Setting this flag will preserve the original field names. Defaults to false. diff --git a/docs/root/api-v1/http_filters/grpc_web_filter.rst b/docs/root/api-v1/http_filters/grpc_web_filter.rst new file mode 100644 index 000000000000..a845a3914dfb --- /dev/null +++ b/docs/root/api-v1/http_filters/grpc_web_filter.rst @@ -0,0 +1,13 @@ +.. _config_http_filters_grpc_web_v1: + +gRPC-Web filter +=============== + +gRPC-Web filter :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "grpc_web", + "config": {} + } diff --git a/docs/root/api-v1/http_filters/health_check_filter.rst b/docs/root/api-v1/http_filters/health_check_filter.rst new file mode 100644 index 000000000000..5a648328b92c --- /dev/null +++ b/docs/root/api-v1/http_filters/health_check_filter.rst @@ -0,0 +1,28 @@ +.. _config_http_filters_health_check_v1: + +Health check +============ + +Health check :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "health_check", + "config": { + "pass_through_mode": "...", + "endpoint": "...", + "cache_time_ms": "..." + } + } + +pass_through_mode + *(required, boolean)* Specifies whether the filter operates in pass through mode or not. + +endpoint + *(required, string)* Specifies the incoming HTTP endpoint that should be considered the + health check endpoint. For example */healthcheck*. + +cache_time_ms + *(optional, integer)* If operating in pass through mode, the amount of time in milliseconds that + the filter should cache the upstream response. diff --git a/docs/root/api-v1/http_filters/http_filters.rst b/docs/root/api-v1/http_filters/http_filters.rst new file mode 100644 index 000000000000..f859e409169c --- /dev/null +++ b/docs/root/api-v1/http_filters/http_filters.rst @@ -0,0 +1,8 @@ +HTTP filters +============ + +.. toctree:: + :glob: + :maxdepth: 2 + + * diff --git a/docs/root/api-v1/http_filters/lua_filter.rst b/docs/root/api-v1/http_filters/lua_filter.rst new file mode 100644 index 000000000000..bf9b15abdbba --- /dev/null +++ b/docs/root/api-v1/http_filters/lua_filter.rst @@ -0,0 +1,21 @@ +.. _config_http_filters_lua_v1: + +Lua +=== + +Lua :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "lua", + "config": { + "inline_code": "..." + } + } + +inline_code + *(required, string)* The Lua code that Envoy will execute. This can be a very small script that + further loads code from disk if desired. Note that if JSON configuration is used, the code must + be properly escaped. YAML configuration may be easier to read since YAML supports multi-line + strings so complex scripts can be easily expressed inline in the configuration. diff --git a/docs/root/api-v1/http_filters/rate_limit_filter.rst b/docs/root/api-v1/http_filters/rate_limit_filter.rst new file mode 100644 index 000000000000..09a7963cd5de --- /dev/null +++ b/docs/root/api-v1/http_filters/rate_limit_filter.rst @@ -0,0 +1,39 @@ +.. _config_http_filters_rate_limit_v1: + +Rate limit +========== + +Rate limit :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "rate_limit", + "config": { + "domain": "...", + "stage": "...", + "request_type": "...", + "timeout_ms": "..." + } + } + +domain + *(required, string)* The rate limit domain to use when calling the rate limit service. + +stage + *(optional, integer)* Specifies the rate limit configurations to be applied with the same stage + number. If not set, the default stage number is 0. + + **NOTE:** The filter supports a range of 0 - 10 inclusively for stage numbers. + +request_type + *(optional, string)* The type of requests the filter should apply to. The supported + types are *internal*, *external* or *both*. A request is considered internal if + :ref:`x-envoy-internal` is set to true. If + :ref:`x-envoy-internal` is not set or false, a + request is considered external. The filter defaults to *both*, and it will apply to all request + types. + +timeout_ms + *(optional, integer)* The timeout in milliseconds for the rate limit service RPC. If not set, + this defaults to 20ms. diff --git a/docs/root/api-v1/http_filters/router_filter.rst b/docs/root/api-v1/http_filters/router_filter.rst new file mode 100644 index 000000000000..71607dfde263 --- /dev/null +++ b/docs/root/api-v1/http_filters/router_filter.rst @@ -0,0 +1,28 @@ +.. _config_http_filters_router_v1: + +Router +====== + +Router :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "router", + "config": { + "dynamic_stats": "...", + "start_child_span": "..." + } + } + +dynamic_stats + *(optional, boolean)* Whether the router generates :ref:`dynamic cluster statistics + `. Defaults to *true*. Can be disabled in high + performance scenarios. + +.. _config_http_filters_router_start_child_span: + +start_child_span + *(optional, boolean)* Whether to start a child :ref:`tracing ` span for + egress routed calls. This can be useful in scenarios where other filters (auth, ratelimit, etc.) + make outbound calls and have child spans rooted at the same ingress parent. Defaults to *false*. diff --git a/docs/root/api-v1/http_filters/squash_filter.rst b/docs/root/api-v1/http_filters/squash_filter.rst new file mode 100644 index 000000000000..78929ea30b68 --- /dev/null +++ b/docs/root/api-v1/http_filters/squash_filter.rst @@ -0,0 +1,56 @@ +.. _config_http_filters_squash_v1: + +Squash +====== + +Squash :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "squash", + "config": { + "cluster": "...", + "attachment_template": "{...}", + "attachment_timeout_ms": "...", + "attachment_poll_period_ms": "...", + "request_timeout_ms": "..." + } + } + +cluster + *(required, object)* The name of the cluster that hosts the Squash server. + +attachment_template + *(required, object)* When the filter requests the Squash server to create a DebugAttachment, it + will use this structure as template for the body of the request. It can contain reference to + environment variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash + server with more information to find the process to attach the debugger to. For example, in a + Istio/k8s environment, this will contain information on the pod: + + .. code-block:: json + + { + "spec": { + "attachment": { + "pod": "{{ POD_NAME }}", + "namespace": "{{ POD_NAMESPACE }}" + }, + "match_request": true + } + } + + (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) + +request_timeout_ms + *(required, integer)* The timeout for individual requests sent to the Squash cluster. Defaults to + 1 second. + +attachment_timeout_ms + *(required, integer)* The total timeout Squash will delay a request and wait for it to be + attached. Defaults to 60 seconds. + +attachment_poll_period_ms + *(required, integer)* Amount of time to poll for the status of the attachment object in the Squash + server (to check if has been attached). Defaults to 1 second. + diff --git a/docs/root/api-v1/listeners/lds.rst b/docs/root/api-v1/listeners/lds.rst new file mode 100644 index 000000000000..704326392c39 --- /dev/null +++ b/docs/root/api-v1/listeners/lds.rst @@ -0,0 +1,49 @@ +.. _config_listeners_lds_v1: + +Listener discovery service (LDS) +================================ + +.. code-block:: json + + { + "cluster": "...", + "refresh_delay_ms": "..." + } + +cluster + *(required, string)* The name of an upstream :ref:`cluster ` that + hosts the listener discovery service. The cluster must run a REST service that implements the + :ref:`LDS HTTP API `. NOTE: This is the *name* of a statically defined + cluster in the :ref:`cluster manager ` configuration, not the full definition of + a cluster as in the case of SDS and CDS. + +refresh_delay_ms + *(optional, integer)* The delay, in milliseconds, between fetches to the LDS API. Envoy will add + an additional random jitter to the delay that is between zero and *refresh_delay_ms* + milliseconds. Thus the longest possible refresh delay is 2 \* *refresh_delay_ms*. Default value + is 30000ms (30 seconds). + +.. _config_listeners_lds_v1_api: + +REST API +-------- + +.. http:get:: /v1/listeners/(string: service_cluster)/(string: service_node) + +Asks the discovery service to return all listeners for a particular `service_cluster` and +`service_node`. `service_cluster` corresponds to the :option:`--service-cluster` CLI option. +`service_node` corresponds to the :option:`--service-node` CLI option. Responses use the following +JSON schema: + +.. code-block:: json + + { + "listeners": [] + } + +listeners + *(Required, array)* A list of :ref:`listeners ` that will be + dynamically added/modified within the listener manager. The management server is expected to + respond with the complete set of listeners that Envoy should configure during each polling cycle. + Envoy will reconcile this list with the listeners that are currently loaded and either + add/modify/remove listeners as necessary. diff --git a/docs/root/api-v1/listeners/listeners.rst b/docs/root/api-v1/listeners/listeners.rst new file mode 100644 index 000000000000..a2fc957702cf --- /dev/null +++ b/docs/root/api-v1/listeners/listeners.rst @@ -0,0 +1,238 @@ +.. _config_listeners_v1: + +Listeners +========= + +.. toctree:: + :hidden: + + lds + +.. code-block:: json + + { + "name": "...", + "address": "...", + "filters": [], + "ssl_context": "{...}", + "bind_to_port": "...", + "use_proxy_proto": "...", + "use_original_dst": "...", + "per_connection_buffer_limit_bytes": "...", + "drain_type": "..." + } + +.. _config_listeners_name: + +name + *(optional, string)* The unique name by which this listener is known. If no name is provided, + Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + updated or removed via :ref:`LDS ` a unique name must be provided. + By default, the maximum length of a listener's name is limited to 60 characters. This limit can be + increased by setting the :option:`--max-obj-name-len` command line argument to the desired value. + +address + *(required, string)* The address that the listener should listen on. Currently only TCP + listeners are supported, e.g., "tcp://127.0.0.1:80". Note, "tcp://0.0.0.0:80" is the wild card + match for any IPv4 address with port 80. + +:ref:`filters ` + *(required, array)* A list of individual :ref:`network filters ` + that make up the filter chain for connections established with the listener. Order matters as the + filters are processed sequentially as connection events happen. + + **Note:** If the filter list is empty, the connection will close by default. + +:ref:`ssl_context ` + *(optional, object)* The :ref:`TLS ` context configuration for a TLS listener. + If no TLS context block is defined, the listener is a plain text listener. + +bind_to_port + *(optional, boolean)* Whether the listener should bind to the port. A listener that doesn't bind + can only receive connections redirected from other listeners that set use_original_dst parameter to + true. Default is true. + +use_proxy_proto + *(optional, boolean)* Whether the listener should expect a + `PROXY protocol V1 `_ header on new + connections. If this option is enabled, the listener will assume that that remote address of the + connection is the one specified in the header. Some load balancers including the AWS ELB support + this option. If the option is absent or set to false, Envoy will use the physical peer address + of the connection as the remote address. + +use_original_dst + *(optional, boolean)* If a connection is redirected using *iptables*, the port on which the proxy + receives it might be different from the original destination address. When this flag is set to true, + the listener hands off redirected connections to the listener associated with the original + destination address. If there is no listener associated with the original destination address, the + connection is handled by the listener that receives it. Defaults to false. + +.. _config_listeners_per_connection_buffer_limit_bytes: + +per_connection_buffer_limit_bytes + *(optional, integer)* Soft limit on size of the listener's new connection read and write buffers. + If unspecified, an implementation defined default is applied (1MiB). + +.. _config_listeners_drain_type: + +drain_type + *(optional, string)* The type of draining that the listener does. Allowed values include *default* + and *modify_only*. See the :ref:`draining ` architecture overview for + more information. + +.. _config_listener_network_filters: + +Filters +------- + +Network filter :ref:`architecture overview `. + +.. code-block:: json + + { + "name": "...", + "config": "{...}" + } + +name + *(required, string)* The name of the filter to instantiate. The name must match a :ref:`supported + filter `. + +config + *(required, object)* Filter specific configuration which depends on the filter being instantiated. + See the :ref:`supported filters ` for further documentation. + +.. _config_listener_ssl_context: + +TLS context +----------- + +TLS :ref:`architecture overview `. + +.. code-block:: json + + { + "cert_chain_file": "...", + "private_key_file": "...", + "alpn_protocols": "...", + "alt_alpn_protocols": "...", + "ca_cert_file": "...", + "verify_certificate_hash": "...", + "verify_subject_alt_name": [], + "crl_file": "...", + "cipher_suites": "...", + "ecdh_curves": "...", + "session_ticket_key_paths": [] + } + +cert_chain_file + *(required, string)* The certificate chain file that should be served by the listener. + +private_key_file + *(required, string)* The private key that corresponds to the certificate chain file. + +alpn_protocols + *(optional, string)* Supplies the list of ALPN protocols that the listener should expose. In + practice this is likely to be set to one of two values (see the + :ref:`codec_type ` parameter in the HTTP connection + manager for more information): + + * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. + * "http/1.1" If the listener is only going to support HTTP/1.1 + +.. _config_listener_ssl_context_alt_alpn: + +alt_alpn_protocols + *(optional, string)* An alternate ALPN protocol string that can be switched to via runtime. This + is useful for example to disable HTTP/2 without having to deploy a new configuration. + +ca_cert_file + *(optional, string)* A file containing certificate authority certificates to use in verifying + a presented client side certificate. If not specified and a client certificate is presented it + will not be verified. By default, a client certificate is optional, unless one of the additional + options ( + :ref:`require_client_certificate `, + :ref:`verify_certificate_hash ` or + :ref:`verify_subject_alt_name `) is also + specified. + +.. _config_listener_ssl_context_require_client_certificate: + +require_client_certificate + *(optional, boolean)* If specified, Envoy will reject connections without a valid client certificate. + +.. _config_listener_ssl_context_verify_certificate_hash: + +verify_certificate_hash + *(optional, string)* If specified, Envoy will verify (pin) the hash of the presented client + side certificate. + +.. _config_listener_ssl_context_verify_subject_alt_name: + +verify_subject_alt_name + *(optional, array)* An optional list of subject alt names. If specified, Envoy will verify + that the client certificate's subject alt name matches one of the specified values. + +.. _config_listener_ssl_context_crl_file: + +crl_file + *(optional, string)* An optional `certificate revocation list + `_ (in PEM format). + If specified, Envoy will verify that the presented peer certificate has not been revoked by + this CRL. If this file contains multiple CRLs, all of them will be used. + +cipher_suites + *(optional, string)* If specified, the TLS listener will only support the specified `cipher list + `_. + If not specified, the default list: + +.. code-block:: none + + [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] + [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] + ECDHE-ECDSA-AES128-SHA256 + ECDHE-RSA-AES128-SHA256 + ECDHE-ECDSA-AES128-SHA + ECDHE-RSA-AES128-SHA + AES128-GCM-SHA256 + AES128-SHA256 + AES128-SHA + ECDHE-ECDSA-AES256-GCM-SHA384 + ECDHE-RSA-AES256-GCM-SHA384 + ECDHE-ECDSA-AES256-SHA384 + ECDHE-RSA-AES256-SHA384 + ECDHE-ECDSA-AES256-SHA + ECDHE-RSA-AES256-SHA + AES256-GCM-SHA384 + AES256-SHA256 + AES256-SHA + +will be used. + +ecdh_curves + *(optional, string)* If specified, the TLS connection will only support the specified ECDH curves. + If not specified, the default curves (X25519, P-256) will be used. + +session_ticket_key_paths + *(optional, array)* Paths to keyfiles for encrypting and decrypting TLS session tickets. The + first keyfile in the array contains the key to encrypt all new sessions created by this context. + All keys are candidates for decrypting received tickets. This allows for easy rotation of keys + by, for example, putting the new keyfile first, and the previous keyfile second. + + If `session_ticket_key_paths` is not specified, the TLS library will still support resuming + sessions via tickets, but it will use an internally-generated and managed key, so sessions cannot + be resumed across hot restarts or on different hosts. + + Each keyfile must contain exactly 80 bytes of cryptographically-secure random data. For example, + the output of ``openssl rand 80``. + + .. attention:: + + Using this feature has serious security considerations and risks. Improper handling of keys may + result in loss of secrecy in connections, even if ciphers supporting perfect forward secrecy + are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some discussion. + To minimize the risk, you must: + + * Keep the session ticket keys at least as secure as your TLS certificate private keys + * Rotate session ticket keys at least daily, and preferably hourly + * Always generate keys using a cryptographically-secure random data source diff --git a/docs/root/api-v1/network_filters/client_ssl_auth_filter.rst b/docs/root/api-v1/network_filters/client_ssl_auth_filter.rst new file mode 100644 index 000000000000..6a4e09a9674b --- /dev/null +++ b/docs/root/api-v1/network_filters/client_ssl_auth_filter.rst @@ -0,0 +1,47 @@ +.. _config_network_filters_client_ssl_auth_v1: + +Client TLS authentication +========================= + +Client TLS authentication :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "client_ssl_auth", + "config": { + "auth_api_cluster": "...", + "stat_prefix": "...", + "refresh_delay_ms": "...", + "ip_white_list": [] + } + } + +auth_api_cluster + *(required, string)* The :ref:`cluster manager ` cluster that runs + the authentication service. The filter will connect to the service every 60s to fetch the list + of principals. The service must support the expected :ref:`REST API + `. + +stat_prefix + *(required, string)* The prefix to use when emitting :ref:`statistics + `. + +refresh_delay_ms + *(optional, integer)* Time in milliseconds between principal refreshes from the authentication + service. Default is 60000 (60s). The actual fetch time will be this value plus a random jittered + value between 0-refresh_delay_ms milliseconds. + +ip_white_list + *(optional, array)* An optional list of IP address and subnet masks that should be white listed + for access by the filter. If no list is provided, there is no IP white list. The list is + specified as in the following example: + + .. code-block:: json + + [ + "192.168.3.0/24", + "50.1.2.3/32", + "10.15.0.0/16", + "2001:abcd::/64" + ] diff --git a/docs/root/api-v1/network_filters/echo_filter.rst b/docs/root/api-v1/network_filters/echo_filter.rst new file mode 100644 index 000000000000..c18e2f9933d9 --- /dev/null +++ b/docs/root/api-v1/network_filters/echo_filter.rst @@ -0,0 +1,13 @@ +.. _config_network_filters_echo_v1: + +Echo +==== + +Echo :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "echo", + "config": {} + } diff --git a/docs/root/api-v1/network_filters/http_conn_man.rst b/docs/root/api-v1/network_filters/http_conn_man.rst new file mode 100644 index 000000000000..b93b7f755903 --- /dev/null +++ b/docs/root/api-v1/network_filters/http_conn_man.rst @@ -0,0 +1,260 @@ +.. _config_network_filters_http_conn_man_v1: + +HTTP connection manager +======================= + +* HTTP connection manager :ref:`architecture overview `. +* HTTP protocols :ref:`architecture overview `. + +.. code-block:: json + + { + "name": "http_connection_manager", + "config": { + "codec_type": "...", + "stat_prefix": "...", + "rds": "{...}", + "route_config": "{...}", + "filters": [], + "add_user_agent": "...", + "tracing": "{...}", + "http1_settings": "{...}", + "http2_settings": "{...}", + "server_name": "...", + "idle_timeout_s": "...", + "drain_timeout_ms": "...", + "access_log": [], + "use_remote_address": "...", + "forward_client_cert": "...", + "set_current_client_cert": "...", + "generate_request_id": "..." + } + } + +.. _config_http_conn_man_codec_type: + +codec_type + *(required, string)* Supplies the type of codec that the connection manager should use. Possible + values are: + + http1 + The connection manager will assume that the client is speaking HTTP/1.1. + + http2 + The connection manager will assume that the client is speaking HTTP/2 (Envoy does not require + HTTP/2 to take place over TLS or to use ALPN. Prior knowledge is allowed). + + auto + For every new connection, the connection manager will determine which codec to use. This mode + supports both ALPN for TLS listeners as well as protocol inference for plaintext listeners. + If ALPN data is available, it is preferred, otherwise protocol inference is used. In almost + all cases, this is the right option to choose for this setting. + +.. _config_http_conn_man_stat_prefix: + +stat_prefix + *(required, string)* The human readable prefix to use when emitting statistics for the + connection manager. See the :ref:`statistics ` documentation + for more information. + +.. _config_http_conn_man_rds_option: + +:ref:`rds ` + *(sometimes required, object)* The connection manager configuration must specify one of *rds* or + *route_config*. If *rds* is specified, the connection manager's route table will be dynamically + loaded via the RDS API. See the :ref:`documentation ` for more + information. + +.. _config_http_conn_man_route_config: + +:ref:`route_config ` + *(sometimes required, object)* The connection manager configuration must specify one of *rds* or + *route_config*. If *route_config* is specified, the :ref:`route table ` + for the connection manager is static and is specified in this property. + +:ref:`filters ` + *(required, array)* A list of individual :ref:`HTTP filters ` that + make up the filter chain for requests made to the connection manager. Order matters as the filters + are processed sequentially as request events happen. + +.. _config_http_conn_man_add_user_agent: + +add_user_agent + *(optional, boolean)* Whether the connection manager manipulates the + :ref:`config_http_conn_man_headers_user-agent` and + :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + documentation for more information. Defaults to false. + +:ref:`tracing ` + *(optional, object)* Presence of the object defines whether the connection manager + emits :ref:`tracing ` data to the :ref:`configured tracing provider + `. + +.. _config_http_conn_man_http1_settings: + +http1_settings + *(optional, object)* Additional HTTP/1 settings that are passed to the HTTP/1 codec. + + allow_absolute_url + *(optional, boolean)* Handle http requests with absolute urls in the requests. These requests + are generally sent by clients to forward/explicit proxies. This allows clients to configure + envoy as their http proxy. In Unix, for example, this is typically done by setting the + http_proxy environment variable. + +.. _config_http_conn_man_http2_settings: + +http2_settings + *(optional, object)* Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + Currently supported settings are: + + hpack_table_size + *(optional, integer)* `Maximum table size `_ + (in octets) that the encoder is permitted to use for + the dynamic HPACK table. Valid values range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. + 0 effectively disables header compression. + + max_concurrent_streams + *(optional, integer)* `Maximum concurrent streams + `_ + allowed for peer on one HTTP/2 connection. + Valid values range from 1 to 2147483647 (2^31 - 1) and defaults to 2147483647. + +.. _config_http_conn_man_http2_settings_initial_stream_window_size: + + initial_stream_window_size + *(optional, integer)* `Initial stream-level flow-control window + `_ size. Valid values range from 65535 + (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 + (256 * 1024 * 1024). + + NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default window + size now, so it's also the minimum. + + This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + stop the flow of data to the codec buffers. + + initial_connection_window_size + *(optional, integer)* Similar to :ref:`initial_stream_window_size + `, but for connection-level flow-control + window. Currently , this has the same minimum/maximum/default as :ref:`initial_stream_window_size + `. + + These are the same options available in the upstream cluster :ref:`http2_settings + ` option. + +.. _config_http_conn_man_server_name: + +server_name + *(optional, string)* An optional override that the connection manager will write to the + :ref:`config_http_conn_man_headers_server` header in responses. If not set, the default is + *envoy*. + +idle_timeout_s + *(optional, integer)* The idle timeout in seconds for connections managed by the connection + manager. The idle timeout is defined as the period in which there are no active requests. If not + set, there is no idle timeout. When the idle timeout is reached the connection will be closed. If + the connection is an HTTP/2 connection a drain sequence will occur prior to closing the + connection. See :ref:`drain_timeout_ms `. + +.. _config_http_conn_man_drain_timeout_ms: + +drain_timeout_ms + *(optional, integer)* The time in milliseconds that Envoy will wait between sending an HTTP/2 + "shutdown notification" (GOAWAY frame with max stream ID) and a final GOAWAY frame. This is used + so that Envoy provides a grace period for new streams that race with the final GOAWAY frame. + During this grace period, Envoy will continue to accept new streams. After the grace period, a + final GOAWAY frame is sent and Envoy will start refusing new streams. Draining occurs both + when a connection hits the idle timeout or during general server draining. The default grace + period is 5000 milliseconds (5 seconds) if this option is not specified. + +:ref:`access_log ` + *(optional, array)* Configuration for :ref:`HTTP access logs ` + emitted by the connection manager. + +.. _config_http_conn_man_use_remote_address: + +use_remote_address + *(optional, boolean)* If set to true, the connection manager will use the real remote address + of the client connection when determining internal versus external origin and manipulating + various headers. If set to false or absent, the connection manager will use the + :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + :ref:`config_http_conn_man_headers_x-forwarded-for`, + :ref:`config_http_conn_man_headers_x-envoy-internal`, and + :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + +.. _config_http_conn_man_forward_client_cert: + +forward_client_cert + *(optional, string)* How to handle the + :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP header. + Possible values are: + + 1. **sanitize**: Do not send the XFCC header to the next hop. This is the default value. + 2. **forward_only**: When the client connection is mTLS (Mutual TLS), forward the XFCC header in the request. + 3. **always_forward_only**: Always forward the XFCC header in the request, regardless of whether the client connection is mTLS. + 4. **append_forward**: When the client connection is mTLS, append the client certificate information to the request's XFCC header and forward it. + 5. **sanitize_set**: When the client connection is mTLS, reset the XFCC header with the client certificate information and send it to the next hop. + + For the format of the XFCC header, please refer to + :ref:`config_http_conn_man_headers_x-forwarded-client-cert`. + +.. _config_http_conn_man_set_current_client_cert_details: + +set_current_client_cert_details + *(optional, array)* A list of strings, possible values are *Subject* and *SAN*. This field is + valid only when *forward_client_cert* is *append_forward* or *sanitize_set* and the client + connection is mTLS. It specifies the fields in the client certificate to be forwarded. Note that + in the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, `Hash` is always set, + and `By` is always set when the client certificate presents the SAN value. + +generate_request_id + *(optional, boolean)* Whether the connection manager will generate the + :ref:`config_http_conn_man_headers_x-request-id` header if it does not exist. This defaults to + *true*. Generating a random UUID4 is expensive so in high throughput scenarios where this + feature is not desired it can be disabled. + +.. _config_http_conn_man_tracing: + +Tracing +------- + +.. code-block:: json + + { + "tracing": { + "operation_name": "...", + "request_headers_for_tags": [] + } + } + +operation_name + *(required, string)* Span name will be derived from operation_name. "ingress" and "egress" + are the only supported values. + +request_headers_for_tags + *(optional, array)* A list of header names used to create tags for the active span. + The header name is used to populate the tag name, and the header value is used to populate the + tag value. The tag is created if the specified header name is present in the request's headers. + +.. _config_http_conn_man_filters: + +Filters +------- + +HTTP filter :ref:`architecture overview `. + +.. code-block:: json + + { + "name": "...", + "config": "{...}" + } + +name + *(required, string)* The name of the filter to instantiate. The name must match a :ref:`supported + filter `. + +config + *(required, object)* Filter specific configuration which depends on the filter being + instantiated. See the :ref:`supported filters ` for further documentation. diff --git a/docs/root/api-v1/network_filters/mongo_proxy_filter.rst b/docs/root/api-v1/network_filters/mongo_proxy_filter.rst new file mode 100644 index 000000000000..cad23bc33ec2 --- /dev/null +++ b/docs/root/api-v1/network_filters/mongo_proxy_filter.rst @@ -0,0 +1,53 @@ +.. _config_network_filters_mongo_proxy_v1: + +Mongo proxy +=========== + +MongoDB :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "mongo_proxy", + "config": { + "stat_prefix": "...", + "access_log": "...", + "fault": {} + } + } + +stat_prefix + *(required, string)* The prefix to use when emitting :ref:`statistics + `. + +access_log + *(optional, string)* The optional path to use for writing Mongo access logs. If not access log + path is specified no access logs will be written. Note that access log is also gated by + :ref:`runtime `. + +fault + *(optional, object)* If specified, the filter will inject faults based on the values in the object. + +Fault configuration +------------------- + +Configuration for MongoDB fixed duration delays. Delays are applied to the following MongoDB +operations: Query, Insert, GetMore, and KillCursors. Once an active delay is in progress, all +incoming data up until the timer event fires will be a part of the delay. + +.. code-block:: json + + { + "fixed_delay": { + "percent": "...", + "duration_ms": "..." + } + } + +percent + *(required, integer)* Probability of an eligible MongoDB operation to be affected by the + injected fault when there is no active fault. Valid values are integers in a range of [0, 100]. + +duration_ms + *(required, integer)* Non-negative delay duration in milliseconds. + diff --git a/docs/root/api-v1/network_filters/network_filters.rst b/docs/root/api-v1/network_filters/network_filters.rst new file mode 100644 index 000000000000..deea21e4ede8 --- /dev/null +++ b/docs/root/api-v1/network_filters/network_filters.rst @@ -0,0 +1,8 @@ +Network filters +=============== + +.. toctree:: + :glob: + :maxdepth: 2 + + * diff --git a/docs/root/api-v1/network_filters/rate_limit_filter.rst b/docs/root/api-v1/network_filters/rate_limit_filter.rst new file mode 100644 index 000000000000..69ca122805ed --- /dev/null +++ b/docs/root/api-v1/network_filters/rate_limit_filter.rst @@ -0,0 +1,40 @@ +.. _config_network_filters_rate_limit_v1: + +Rate limit +========== + +Rate limit :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "ratelimit", + "config": { + "stat_prefix": "...", + "domain": "...", + "descriptors": [], + "timeout_ms": "..." + } + } + +stat_prefix + *(required, string)* The prefix to use when emitting :ref:`statistics + `. + +domain + *(required, string)* The rate limit domain to use in the rate limit service request. + +descriptors + *(required, array)* The rate limit descriptor list to use in the rate limit service request. The + descriptors are specified as in the following example: + + .. code-block:: json + + [ + [{"key": "hello", "value": "world"}, {"key": "foo", "value": "bar"}], + [{"key": "foo2", "value": "bar2"}] + ] + +timeout_ms + *(optional, integer)* The timeout in milliseconds for the rate limit service RPC. If not set, + this defaults to 20ms. diff --git a/docs/root/api-v1/network_filters/redis_proxy_filter.rst b/docs/root/api-v1/network_filters/redis_proxy_filter.rst new file mode 100644 index 000000000000..b2d2653cf710 --- /dev/null +++ b/docs/root/api-v1/network_filters/redis_proxy_filter.rst @@ -0,0 +1,46 @@ +.. _config_network_filters_redis_proxy_v1: + +Redis proxy +=========== + +Redis proxy :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "redis_proxy", + "config": { + "cluster_name": "...", + "conn_pool": "{...}", + "stat_prefix": "..." + } + } + +cluster_name + *(required, string)* Name of cluster from cluster manager. + See the :ref:`configuration section ` of the architecture + overview for recommendations on configuring the backing cluster. + +conn_pool + *(required, object)* Connection pool configuration. + +stat_prefix + *(required, string)* The prefix to use when emitting :ref:`statistics + `. + +Connection pool configuration +----------------------------- + +.. code-block:: json + + { + "op_timeout_ms": "...", + } + +op_timeout_ms + *(required, integer)* Per-operation timeout in milliseconds. The timer starts when the first + command of a pipeline is written to the backend connection. Each response received from Redis + resets the timer since it signifies that the next command is being processed by the backend. + The only exception to this behavior is when a connection to a backend is not yet established. In + that case, the connect timeout on the cluster will govern the timeout until the connection is + ready. diff --git a/docs/root/api-v1/network_filters/tcp_proxy_filter.rst b/docs/root/api-v1/network_filters/tcp_proxy_filter.rst new file mode 100644 index 000000000000..2dee121f2282 --- /dev/null +++ b/docs/root/api-v1/network_filters/tcp_proxy_filter.rst @@ -0,0 +1,126 @@ +.. _config_network_filters_tcp_proxy_v1: + +TCP proxy +========= + +TCP proxy :ref:`configuration overview `. + +.. code-block:: json + + { + "name": "tcp_proxy", + "config": { + "stat_prefix": "...", + "route_config": "{...}", + "access_log": [] + } + } + +:ref:`route_config ` + *(required, object)* The route table for the filter. + All filter instances must have a route table, even if it is empty. + +stat_prefix + *(required, string)* The prefix to use when emitting :ref:`statistics + `. + +:ref:`access_log ` + *(optional, array)* Configuration for :ref:`access logs ` + emitted by the this tcp_proxy. + +.. _config_network_filters_tcp_proxy_route_config: + +Route Configuration +------------------- + +.. code-block:: json + + { + "routes": [] + } + +:ref:`routes ` + *(required, array)* An array of route entries that make up the route table. + +.. _config_network_filters_tcp_proxy_route: + +Route +----- + +A TCP proxy route consists of a set of optional L4 criteria and the name of a +:ref:`cluster `. If a downstream connection matches +all the specified criteria, the cluster in the route is used for the corresponding upstream +connection. Routes are tried in the order specified until a match is found. If no match is +found, the connection is closed. A route with no criteria is valid and always produces a match. + +.. code-block:: json + + { + "cluster": "...", + "destination_ip_list": [], + "destination_ports": "...", + "source_ip_list": [], + "source_ports": "..." + } + +cluster + *(required, string)* The :ref:`cluster ` to connect + to when a the downstream network connection matches the specified criteria. + +destination_ip_list + *(optional, array)* An optional list of IP address subnets in the form "ip_address/xx". + The criteria is satisfied if the destination IP address of the downstream connection is + contained in at least one of the specified subnets. + If the parameter is not specified or the list is empty, the destination IP address is ignored. + The destination IP address of the downstream connection might be different from the addresses + on which the proxy is listening if the connection has been redirected. Example: + + .. code-block:: json + + [ + "192.168.3.0/24", + "50.1.2.3/32", + "10.15.0.0/16", + "2001:abcd::/64" + ] + +destination_ports + *(optional, string)* An optional string containing a comma-separated list of port numbers or + ranges. The criteria is satisfied if the destination port of the downstream connection + is contained in at least one of the specified ranges. + If the parameter is not specified, the destination port is ignored. The destination port address + of the downstream connection might be different from the port on which the proxy is listening if + the connection has been redirected. Example: + + .. code-block:: json + + { + "destination_ports": "1-1024,2048-4096,12345" + } + +source_ip_list + *(optional, array)* An optional list of IP address subnets in the form "ip_address/xx". + The criteria is satisfied if the source IP address of the downstream connection is contained + in at least one of the specified subnets. If the parameter is not specified or the list is empty, + the source IP address is ignored. Example: + + .. code-block:: json + + [ + "192.168.3.0/24", + "50.1.2.3/32", + "10.15.0.0/16", + "2001:abcd::/64" + ] + +source_ports + *(optional, string)* An optional string containing a comma-separated list of port numbers or + ranges. The criteria is satisfied if the source port of the downstream connection is contained + in at least one of the specified ranges. If the parameter is not specified, the source port is + ignored. Example: + + .. code-block:: json + + { + "source_ports": "1-1024,2048-4096,12345" + } diff --git a/docs/root/api-v1/rate_limit.rst b/docs/root/api-v1/rate_limit.rst new file mode 100644 index 000000000000..02b867ad0f33 --- /dev/null +++ b/docs/root/api-v1/rate_limit.rst @@ -0,0 +1,28 @@ +.. _config_rate_limit_service_v1: + +Rate limit service +================== + +Rate limit :ref:`configuration overview `. + +.. code-block:: json + + { + "type": "grpc_service", + "config": { + "cluster_name": "..." + } + } + +type + *(required, string)* Specifies the type of rate limit service to call. Currently the only + supported option is *grpc_service* which specifies Lyft's global rate limit service and + associated IDL. + +config + *(required, object)* Specifies type specific configuration for the rate limit service. + + cluster_name + *(required, string)* Specifies the cluster manager cluster name that hosts the rate limit + service. The client will connect to this cluster when it needs to make rate limit service + requests. diff --git a/docs/root/api-v1/route_config/rate_limits.rst b/docs/root/api-v1/route_config/rate_limits.rst new file mode 100644 index 000000000000..a5d7624778be --- /dev/null +++ b/docs/root/api-v1/route_config/rate_limits.rst @@ -0,0 +1,183 @@ +.. _config_http_conn_man_route_table_rate_limit_config: + +Rate limit configuration +======================== + +Global rate limiting :ref:`architecture overview `. + +.. code-block:: json + + { + "stage": "...", + "disable_key": "...", + "actions": [] + } + +stage + *(optional, integer)* Refers to the stage set in the filter. The rate limit configuration + only applies to filters with the same stage number. The default stage number is 0. + + **NOTE:** The filter supports a range of 0 - 10 inclusively for stage numbers. + +disable_key + *(optional, string)* The key to be set in runtime to disable this rate limit configuration. + +actions + *(required, array)* A list of actions that are to be applied for this rate limit configuration. + Order matters as the actions are processed sequentially and the descriptor is composed by + appending descriptor entries in that sequence. If an action cannot append a descriptor entry, + no descriptor is generated for the configuration. See :ref:`composing actions + ` for additional documentation. + +.. _config_http_conn_man_route_table_rate_limit_actions: + +Actions +------- + +.. code-block:: json + + { + "type": "..." + } + +type + *(required, string)* The type of rate limit action to perform. The currently supported action + types are *source_cluster*, *destination_cluster* , *request_headers*, *remote_address*, + *generic_key* and *header_value_match*. + +Source Cluster +^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "type": "source_cluster" + } + +The following descriptor entry is appended to the descriptor: + +.. code-block:: cpp + + ("source_cluster", "") + + is derived from the :option:`--service-cluster` option. + +Destination Cluster +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "type": "destination_cluster" + } + +The following descriptor entry is appended to the descriptor: + +.. code-block:: cpp + + ("destination_cluster", "") + +Once a request matches against a route table rule, a routed cluster is determined by one of the +following :ref:`route table configuration ` +settings: + + * :ref:`cluster ` indicates the upstream cluster + to route to. + * :ref:`weighted_clusters ` + chooses a cluster randomly from a set of clusters with attributed weight. + * :ref:`cluster_header` indicates which + header in the request contains the target cluster. + +Request Headers +^^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "type": "request_headers", + "header_name": "...", + "descriptor_key" : "..." + } + +header_name + *(required, string)* The header name to be queried from the request headers. The header's value is + used to populate the value of the descriptor entry for the descriptor_key. + +descriptor_key + *(required, string)* The key to use in the descriptor entry. + +The following descriptor entry is appended when a header contains a key that matches the +*header_name*: + +.. code-block:: cpp + + ("", "") + +Remote Address +^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "type": "remote_address" + } + +The following descriptor entry is appended to the descriptor and is populated using the trusted +address from :ref:`x-forwarded-for `: + +.. code-block:: cpp + + ("remote_address", "") + +Generic Key +^^^^^^^^^^^ + +.. code-block:: json + + { + "type": "generic_key", + "descriptor_value" : "..." + } + + +descriptor_value + *(required, string)* The value to use in the descriptor entry. + +The following descriptor entry is appended to the descriptor: + +.. code-block:: cpp + + ("generic_key", "") + +Header Value Match +^^^^^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "type": "header_value_match", + "descriptor_value" : "...", + "expect_match" : "...", + "headers" : [] + } + + +descriptor_value + *(required, string)* The value to use in the descriptor entry. + +expect_match + *(optional, boolean)* If set to true, the action will append a descriptor entry when the request + matches the :ref:`headers`. If set to false, + the action will append a descriptor entry when the request does not match the + :ref:`headers`. The default value is true. + +:ref:`headers` + *(required, array)* Specifies a set of headers that the rate limit action should match on. The + action will check the request's headers against all the specified headers in the config. A match + will happen if all the headers in the config are present in the request with the same values (or + based on presence if the ``value`` field is not in the config). + +The following descriptor entry is appended to the descriptor: +.. code-block:: cpp + + ("header_match", "") diff --git a/docs/root/api-v1/route_config/rds.rst b/docs/root/api-v1/route_config/rds.rst new file mode 100644 index 000000000000..650744dd5d27 --- /dev/null +++ b/docs/root/api-v1/route_config/rds.rst @@ -0,0 +1,63 @@ +.. _config_http_conn_man_rds_v1: + +Route discovery service (RDS) +============================= + +.. code-block:: json + + { + "cluster": "...", + "route_config_name": "...", + "refresh_delay_ms": "..." + } + +cluster + *(required, string)* The name of an upstream :ref:`cluster ` that + hosts the route discovery service. The cluster must run a REST service that implements the + :ref:`RDS HTTP API `. NOTE: This is the *name* of a statically defined + cluster in the :ref:`cluster manager ` configuration, not the full definition of + a cluster as in the case of SDS and CDS. + +route_config_name + *(required, string)* The name of the route configuration. This name will be passed to the + :ref:`RDS HTTP API `. This allows an Envoy configuration with + multiple HTTP listeners (and associated HTTP connection manager filters) to use different route + configurations. By default, the maximum length of the name is limited to 60 characters. This + limit can be increased by setting the :option:`--max-obj-name-len` command line argument to the + desired value. + +refresh_delay_ms + *(optional, integer)* The delay, in milliseconds, between fetches to the RDS API. Envoy will add + an additional random jitter to the delay that is between zero and *refresh_delay_ms* + milliseconds. Thus the longest possible refresh delay is 2 \* *refresh_delay_ms*. Default + value is 30000ms (30 seconds). + +.. _config_http_conn_man_rds_v1_api: + +REST API +-------- + +.. http:get:: /v1/routes/(string: route_config_name)/(string: service_cluster)/(string: service_node) + +Asks the route discovery service to return the route configuration for a particular +`route_config_name`, `service_cluster`, and `service_node`. `route_config_name` corresponds to the +RDS configuration parameter above. `service_cluster` corresponds to the :option:`--service-cluster` +CLI option. `service_node` corresponds to the :option:`--service-node` CLI option. Responses are a +single JSON object that contains a route configuration as defined in the :ref:`route configuration +documentation `. + +A new route configuration will be gracefully swapped in such that existing requests are not +affected. This means that when a request starts, it sees a consistent snapshot of the route +configuration that does not change for the duration of the request. Thus, if an update changes a +timeout for example, only new requests will use the updated timeout value. + +As a performance optimization, Envoy hashes the route configuration it receives from the RDS API and +will only perform a full reload if the hash value changes. + +.. attention:: + + Route configurations that are loaded via RDS are *not* checked to see if referenced clusters are + known to the :ref:`cluster manager `. The RDS API has been designed to + work alongside the :ref:`CDS API ` such that Envoy assumes eventually + consistent updates. If a route references an unknown cluster a 404 response will be returned by + the router filter. diff --git a/docs/root/api-v1/route_config/route.rst b/docs/root/api-v1/route_config/route.rst new file mode 100644 index 000000000000..f9c42648f4c8 --- /dev/null +++ b/docs/root/api-v1/route_config/route.rst @@ -0,0 +1,553 @@ +.. _config_http_conn_man_route_table_route: + +Route +===== + +A route is both a specification of how to match a request as well as in indication of what to do +next (e.g., redirect, forward, rewrite, etc.). + +.. attention:: + + Envoy supports routing on HTTP method via :ref:`header matching + `. + +.. code-block:: json + + { + "prefix": "...", + "path": "...", + "regex": "...", + "cluster": "...", + "cluster_header": "...", + "weighted_clusters" : "{...}", + "host_redirect": "...", + "path_redirect": "...", + "prefix_rewrite": "...", + "host_rewrite": "...", + "auto_host_rewrite": "...", + "case_sensitive": "...", + "use_websocket": "...", + "timeout_ms": "...", + "runtime": "{...}", + "retry_policy": "{...}", + "shadow": "{...}", + "priority": "...", + "headers": [], + "rate_limits": [], + "include_vh_rate_limits" : "...", + "hash_policy": "{...}", + "request_headers_to_add" : [], + "opaque_config": [], + "cors": "{...}", + "decorator" : "{...}" + } + +prefix + *(sometimes required, string)* If specified, the route is a prefix rule meaning that the prefix + must match the beginning of the :path header. One of *prefix*, *path*, or *regex* must be specified. + +path + *(sometimes required, string)* If specified, the route is an exact path rule meaning that the path + must exactly match the :path header once the query string is removed. One of *prefix*, *path*, or + *regex* must be specified. + +regex + *(sometimes required, string)* If specified, the route is a regular expression rule meaning that the + regex must match the :path header once the query string is removed. The entire path (without the + query string) must match the regex. The rule will not match if only a subsequence of the :path header + matches the regex. The regex grammar is defined `here + `_. One of *prefix*, *path*, or + *regex* must be specified. + + Examples: + + * The regex */b[io]t* matches the path */bit* + * The regex */b[io]t* matches the path */bot* + * The regex */b[io]t* does not match the path */bite* + * The regex */b[io]t* does not match the path */bit/bot* + +:ref:`cors ` + *(optional, object)* Specifies the route's CORS policy. + +.. _config_http_conn_man_route_table_route_cluster: + +cluster + *(sometimes required, string)* If the route is not a redirect (*host_redirect* and/or + *path_redirect* is not specified), one of *cluster*, *cluster_header*, or *weighted_clusters* must + be specified. When *cluster* is specified, its value indicates the upstream cluster to which the + request should be forwarded to. + +.. _config_http_conn_man_route_table_route_cluster_header: + +cluster_header + *(sometimes required, string)* If the route is not a redirect (*host_redirect* and/or + *path_redirect* is not specified), one of *cluster*, *cluster_header*, or *weighted_clusters* must + be specified. When *cluster_header* is specified, Envoy will determine the cluster to route to + by reading the value of the HTTP header named by *cluster_header* from the request headers. + If the header is not found or the referenced cluster does not exist, Envoy will return a 404 + response. + + .. attention:: + + Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* + header. Thus, if attempting to match on *Host*, match on *:authority* instead. + +.. _config_http_conn_man_route_table_route_config_weighted_clusters: + +:ref:`weighted_clusters ` + *(sometimes required, object)* If the route is not a redirect (*host_redirect* and/or + *path_redirect* is not specified), one of *cluster*, *cluster_header*, or *weighted_clusters* must + be specified. With the *weighted_clusters* option, multiple upstream clusters can be specified for + a given route. The request is forwarded to one of the upstream clusters based on weights assigned + to each cluster. See :ref:`traffic splitting ` + for additional documentation. + +.. _config_http_conn_man_route_table_route_host_redirect: + +host_redirect + *(sometimes required, string)* Indicates that the route is a redirect rule. If there is a match, + a 301 redirect response will be sent which swaps the host portion of the URL with this value. + *path_redirect* can also be specified along with this option. + +.. _config_http_conn_man_route_table_route_path_redirect: + +path_redirect + *(sometimes required, string)* Indicates that the route is a redirect rule. If there is a match, + a 301 redirect response will be sent which swaps the path portion of the URL with this value. + *host_redirect* can also be specified along with this option. The router filter will place + the original path before rewrite into the :ref:`x-envoy-original-path + ` header. + +.. _config_http_conn_man_route_table_route_prefix_rewrite: + +prefix_rewrite + *(optional, string)* Indicates that during forwarding, the matched prefix (or path) should be + swapped with this value. When using regex path matching, the entire path (not including + the query string) will be swapped with this value. This option allows application URLs to be + rooted at a different path from those exposed at the reverse proxy layer. + +.. _config_http_conn_man_route_table_route_host_rewrite: + +host_rewrite + *(optional, string)* Indicates that during forwarding, the host header will be swapped with this + value. + +.. _config_http_conn_man_route_table_route_auto_host_rewrite: + +auto_host_rewrite + *(optional, boolean)* Indicates that during forwarding, the host header will be swapped with the + hostname of the upstream host chosen by the cluster manager. This option is applicable only when + the destination cluster for a route is of type *strict_dns* or *logical_dns*. Setting this to true + with other cluster types has no effect. *auto_host_rewrite* and *host_rewrite* are mutually exclusive + options. Only one can be specified. + +.. _config_http_conn_man_route_table_route_case_sensitive: + +case_sensitive + *(optional, boolean)* Indicates that prefix/path matching should be case sensitive. The default + is true. + +.. _config_http_conn_man_route_table_route_use_websocket: + +use_websocket + *(optional, boolean)* Indicates that a HTTP/1.1 client connection to this particular route + should be allowed to upgrade to a WebSocket connection. The default is false. + + .. attention:: + + If set to true, Envoy will expect the first request matching this route to contain WebSocket + upgrade headers. If the headers are not present, the connection will be processed as a normal + HTTP/1.1 connection. If the upgrade headers are present, Envoy will setup plain TCP proxying + between the client and the upstream server. Hence, an upstream server that rejects the WebSocket + upgrade request is also responsible for closing the associated connection. Until then, Envoy will + continue to proxy data from the client to the upstream server. + + Redirects, timeouts and retries are not supported on requests with WebSocket upgrade headers. + +.. _config_http_conn_man_route_table_route_timeout: + +timeout_ms + *(optional, integer)* Specifies the timeout for the route. If not specified, the default is 15s. + Note that this timeout includes all retries. See also + :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + :ref:`retry overview `. + +:ref:`runtime ` + *(optional, object)* Indicates that the route should additionally match on a runtime key. + +:ref:`retry_policy ` + *(optional, object)* Indicates that the route has a retry policy. + +:ref:`shadow ` + *(optional, object)* Indicates that the route has a shadow policy. + +priority + *(optional, string)* Optionally specifies the :ref:`routing priority + `. + +:ref:`headers ` + *(optional, array)* Specifies a set of headers that the route should match on. The router will + check the request's headers against all the specified headers in the route config. A match will + happen if all the headers in the route are present in the request with the same values (or based + on presence if the ``value`` field is not in the config). + +request_headers_to_add + *(optional, array)* Specifies a list of HTTP headers that should be added to each + request handled by this virtual host. Headers are specified in the following form: + + .. code-block:: json + + [ + {"key": "header1", "value": "value1"}, + {"key": "header2", "value": "value2"} + ] + + For more information see the documentation on :ref:`custom request headers + `. + +:ref:`opaque_config ` + *(optional, array)* Specifies a set of optional route configuration values that can be accessed by filters. + +.. _config_http_conn_man_route_table_route_rate_limits: + +:ref:`rate_limits ` + *(optional, array)* Specifies a set of rate limit configurations that could be applied to the + route. + +.. _config_http_conn_man_route_table_route_include_vh: + +include_vh_rate_limits + *(optional, boolean)* Specifies if the rate limit filter should include the virtual host rate + limits. By default, if the route configured rate limits, the virtual host + :ref:`rate_limits ` are not applied to the + request. + +:ref:`hash_policy ` + *(optional, object)* Specifies the route's hashing policy if the upstream cluster uses a hashing + :ref:`load balancer `. + +:ref:`decorator ` + *(optional, object)* Specifies the route's decorator used to enhance information reported about + the matched request. + +.. _config_http_conn_man_route_table_route_runtime: + +Runtime +------- + +A :ref:`runtime ` route configuration can be used to roll out route changes +in a gradual manner without full code/config deploys. Refer to the +:ref:`traffic shifting ` docs +for additional documentation. + +.. code-block:: json + + { + "key": "...", + "default": "..." + } + +key + *(required, string)* Specifies the runtime key name that should be consulted to determine whether + the route matches or not. See the :ref:`runtime documentation ` for how key + names map to the underlying implementation. + +.. _config_http_conn_man_route_table_route_runtime_default: + +default + *(required, integer)* An integer between 0-100. Every time the route is considered for a match, + a random number between 0-99 is selected. If the number is <= the value found in the *key* + (checked first) or, if the key is not present, the default value, the route is a match (assuming + everything also about the route matches). + +.. _config_http_conn_man_route_table_route_retry: + +Retry policy +------------ + +HTTP retry :ref:`architecture overview `. + +.. code-block:: json + + { + "retry_on": "...", + "num_retries": "...", + "per_try_timeout_ms" : "..." + } + +retry_on + *(required, string)* Specifies the conditions under which retry takes place. These are the same + conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + +num_retries + *(optional, integer)* Specifies the allowed number of retries. This parameter is optional and + defaults to 1. These are the same conditions documented for + :ref:`config_http_filters_router_x-envoy-max-retries`. + +per_try_timeout_ms + *(optional, integer)* Specifies a non-zero timeout per retry attempt. This parameter is optional. + The same conditions documented for + :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + + **Note:** If left unspecified, Envoy will use the global + :ref:`route timeout ` for the request. + Consequently, when using a :ref:`5xx ` based + retry policy, a request that times out will not be retried as the total timeout budget + would have been exhausted. + +.. _config_http_conn_man_route_table_route_shadow: + +Shadow +------ + +The router is capable of shadowing traffic from one cluster to another. The current implementation +is "fire and forget," meaning Envoy will not wait for the shadow cluster to respond before returning +the response from the primary cluster. All normal statistics are collected for the shadow +cluster making this feature useful for testing. + +During shadowing, the host/authority header is altered such that *-shadow* is appended. This is +useful for logging. For example, *cluster1* becomes *cluster1-shadow*. + +.. code-block:: json + + { + "cluster": "...", + "runtime_key": "..." + } + +cluster + *(required, string)* Specifies the cluster that requests will be shadowed to. The cluster must + exist in the :ref:`cluster manager configuration `. + +runtime_key + *(optional, string)* If not specified, **all** requests to the target cluster will be shadowed. + If specified, Envoy will lookup the runtime key to get the % of requests to shadow. Valid values are + from 0 to 10000, allowing for increments of 0.01% of requests to be shadowed. If the runtime key + is specified in the configuration but not present in runtime, 0 is the default and thus 0% of + requests will be shadowed. + +.. _config_http_conn_man_route_table_route_headers: + +Headers +------- + +.. code-block:: json + + { + "name": "...", + "value": "...", + "regex": "...", + "range_match": "..." + } + +name + *(required, string)* Specifies the name of the header in the request. + +value + *(optional, string)* Specifies the value of the header. If the value is absent a request that has + the *name* header will match, regardless of the header's value. + +regex + *(optional, boolean)* Specifies whether the header value is a regular + expression or not. Defaults to false. The entire request header value must match the regex. The + rule will not match if only a subsequence of the request header value matches the regex. The + regex grammar used in the value field is defined + `here `_. + + Examples: + + * The regex *\d{3}* matches the value *123* + * The regex *\d{3}* does not match the value *1234* + * The regex *\d{3}* does not match the value *123.456* + +:ref:`range_match ` + *(optional, object)* Specifies the range that will be used for header matching. + +.. attention:: + + Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* + header. Thus, if attempting to match on *Host*, match on *:authority* instead. + +.. attention:: + + To route on HTTP method, use the special HTTP/2 *:method* header. This works for both + HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., + + .. code-block:: json + + { + "name": ":method", + "value": "POST" + } + +.. _config_http_conn_man_route_table_route_weighted_clusters: + +Weighted Clusters +----------------- + +Compared to the ``cluster`` field that specifies a single upstream cluster as the target +of a request, the ``weighted_clusters`` option allows for specification of multiple upstream clusters +along with weights that indicate the **percentage** of traffic to be forwarded to each cluster. +The router selects an upstream cluster based on the weights. + +.. code-block:: json + + { + "clusters": [], + "runtime_key_prefix" : "..." + } + +clusters + *(required, array)* Specifies one or more upstream clusters associated with the route. + + .. code-block:: json + + { + "name" : "...", + "weight": "..." + } + + name + *(required, string)* Name of the upstream cluster. The cluster must exist in the + :ref:`cluster manager configuration `. + + weight + *(required, integer)* An integer between 0-100. When a request matches the route, + the choice of an upstream cluster is determined by its weight. The sum of + weights across all entries in the *clusters* array must add up to 100. + +runtime_key_prefix + *(optional, string)* Specifies the runtime key prefix that should be used to construct the runtime + keys associated with each cluster. When the ``runtime_key_prefix`` is specified, the router will + look for weights associated with each upstream cluster under the key + ``runtime_key_prefix + "." + cluster[i].name`` where ``cluster[i]`` denotes an entry in the + ``clusters`` array field. If the runtime key for the cluster does not exist, the value specified + in the configuration file will be used as the default weight. + See the :ref:`runtime documentation ` for how key names map to the + underlying implementation. + + **Note:** If the sum of runtime weights exceed 100, the traffic splitting behavior + is undefined (although the request will be routed to one of the clusters). + +.. _config_http_conn_man_route_table_hash_policy: + +Hash policy +----------- + +Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer +`. + +.. code-block:: json + + { + "header_name": "..." + } + +header_name + *(required, string)* The name of the request header that will be used to obtain the hash key. If + the request header is not present, the load balancer will use a random number as the hash, + effectively making the load balancing policy random. + +.. _config_http_conn_man_route_table_decorator: + +Decorator +--------- + +Specifies the route's decorator. + +.. code-block:: json + + { + "operation": "..." + } + +operation + *(required, string)* The operation name associated with the request matched to this route. If tracing is + enabled, this information will be used as the span name reported for this request. NOTE: For ingress + (inbound) requests, or egress (outbound) responses, this value may be overridden by the + :ref:`x-envoy-decorator-operation ` header. + +.. _config_http_conn_man_route_table_opaque_config: + +Opaque Config +------------- + +Additional configuration can be provided to filters through the "Opaque Config" mechanism. A +list of properties are specified in the route config. The configuration is uninterpreted +by envoy and can be accessed within a user-defined filter. The configuration is a generic +string map. Nested objects are not supported. + +.. code-block:: json + + [ + {"...": "..."} + ] + +.. _config_http_conn_man_route_table_cors: + +Cors +-------- + +Settings on a route take precedence over settings on the virtual host. + +.. code-block:: json + + { + "enabled": false, + "allow_origin": ["http://foo.example"], + "allow_methods": "POST, GET, OPTIONS", + "allow_headers": "Content-Type", + "allow_credentials": false, + "expose_headers": "X-Custom-Header", + "max_age": "86400" + } + +enabled + *(optional, boolean)* Defaults to true. Setting *enabled* to false on a route disables CORS + for this route only. The setting has no effect on a virtual host. + +allow_origin + *(optional, array)* The origins that will be allowed to do CORS request. + Wildcard "\*" will allow any origin. + +allow_methods + *(optional, string)* The content for the *access-control-allow-methods* header. + Comma separated list of HTTP methods. + +allow_headers + *(optional, string)* The content for the *access-control-allow-headers* header. + Comma separated list of HTTP headers. + +allow_credentials + *(optional, boolean)* Whether the resource allows credentials. + +expose_headers + *(optional, string)* The content for the *access-control-expose-headers* header. + Comma separated list of HTTP headers. + +max_age + *(optional, string)* The content for the *access-control-max-age* header. + Value in seconds for how long the response to the preflight request can be cached. + + .. _config_http_conn_man_route_table_range: + +range_match +-------------- + +Specifies the int64 start and end of the range using half-open interval semantics [start, end). +Header route matching will be performed if the header's value lies within this range. + +.. code-block:: json + + { + "start": "...", + "end": "..." + } + +start + *(required, integer)* start of the range (inclusive). + +end + *(required, integer)* end of the range (exclusive). diff --git a/docs/root/api-v1/route_config/route_config.rst b/docs/root/api-v1/route_config/route_config.rst new file mode 100644 index 000000000000..6e57c2f3ccdc --- /dev/null +++ b/docs/root/api-v1/route_config/route_config.rst @@ -0,0 +1,92 @@ +.. _config_http_conn_man_route_table: + +HTTP Route configuration +======================== + +* Routing :ref:`architecture overview ` +* HTTP :ref:`router filter ` + +.. code-block:: json + + { + "validate_clusters": "...", + "virtual_hosts": [], + "internal_only_headers": [], + "response_headers_to_add": [], + "response_headers_to_remove": [], + "request_headers_to_add": [] + } + +.. _config_http_conn_man_route_table_validate_clusters: + +validate_clusters + *(optional, boolean)* An optional boolean that specifies whether the clusters that the route + table refers to will be validated by the cluster manager. If set to true and a route refers to + a non-existent cluster, the route table will not load. If set to false and a route refers to a + non-existent cluster, the route table will load and the router filter will return a 404 if the + route is selected at runtime. This setting defaults to true if the route table is statically + defined via the :ref:`route_config ` option. This setting + default to false if the route table is loaded dynamically via the :ref:`rds + ` option. Users may which to override the default behavior in + certain cases (for example when using :ref:`cds ` with a static + route table). + +:ref:`virtual_hosts ` + *(required, array)* An array of virtual hosts that make up the route table. + +internal_only_headers + *(optional, array)* Optionally specifies a list of HTTP headers that the connection manager + will consider to be internal only. If they are found on external requests they will be cleaned + prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + information. Headers are specified in the following form: + + .. code-block:: json + + ["header1", "header2"] + +response_headers_to_add + *(optional, array)* Optionally specifies a list of HTTP headers that should be added to each + response that the connection manager encodes. Headers are specified in the following form: + + .. code-block:: json + + [ + {"key": "header1", "value": "value1"}, + {"key": "header2", "value": "value2"} + ] + + For more information, including details on header value syntax, see the documentation on + :ref:`custom request headers `. + +response_headers_to_remove + *(optional, array)* Optionally specifies a list of HTTP headers that should be removed from each + response that the connection manager encodes. Headers are specified in the following form: + + .. code-block:: json + + ["header1", "header2"] + +.. _config_http_conn_man_route_table_add_req_headers: + +request_headers_to_add + *(optional, array)* Specifies a list of HTTP headers that should be added to each + request forwarded by the HTTP connection manager. Headers are specified in the following form: + + .. code-block:: json + + [ + {"key": "header1", "value": "value1"}, + {"key": "header2", "value": "value2"} + ] + + For more information, including details on header value syntax, see the documentation on + :ref:`custom request headers `. + +.. toctree:: + :hidden: + + vhost + route + vcluster + rate_limits + rds diff --git a/docs/root/api-v1/route_config/vcluster.rst b/docs/root/api-v1/route_config/vcluster.rst new file mode 100644 index 000000000000..075c3b2f567d --- /dev/null +++ b/docs/root/api-v1/route_config/vcluster.rst @@ -0,0 +1,47 @@ +.. _config_http_conn_man_route_table_vcluster: + +Virtual cluster +=============== + +A virtual cluster is a way of specifying a regex matching rule against certain important endpoints +such that statistics are generated explicitly for the matched requests. The reason this is useful is +that when doing prefix/path matching Envoy does not always know what the application considers to +be an endpoint. Thus, it's impossible for Envoy to generically emit per endpoint statistics. +However, often systems have highly critical endpoints that they wish to get "perfect" statistics on. +Virtual cluster statistics are perfect in the sense that they are emitted on the downstream side +such that they include network level failures. + +.. note:: + + Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for + every application endpoint. This is both not easily maintainable as well as the matching and + statistics output are not free. + +.. code-block:: json + + { + "pattern": "...", + "name": "...", + "method": "..." + } + +pattern + *(required, string)* Specifies a regex pattern to use for matching requests. The entire path of the request + must match the regex. The regex grammar used is defined `here `_. + +name + *(required, string)* Specifies the name of the virtual cluster. The virtual cluster name as well + as the virtual host name are used when emitting statistics. The statistics are emitted by the + router filter and are documented :ref:`here `. + +method + *(optional, string)* Optionally specifies the HTTP method to match on. For example *GET*, *PUT*, + etc. + + Examples: + + * The regex */rides/\d+* matches the path */rides/0* + * The regex */rides/\d+* matches the path */rides/123* + * The regex */rides/\d+* does not match the path */rides/123/456* + +Documentation for :ref:`virtual cluster statistics `. diff --git a/docs/root/api-v1/route_config/vhost.rst b/docs/root/api-v1/route_config/vhost.rst new file mode 100644 index 000000000000..2d4662124101 --- /dev/null +++ b/docs/root/api-v1/route_config/vhost.rst @@ -0,0 +1,84 @@ +.. _config_http_conn_man_route_table_vhost: + +Virtual host +============ + +The top level element in the routing configuration is a virtual host. Each virtual host has +a logical name as well as a set of domains that get routed to it based on the incoming request's +host header. This allows a single listener to service multiple top level domain path trees. Once a +virtual host is selected based on the domain, the routes are processed in order to see which +upstream cluster to route to or whether to perform a redirect. + +.. code-block:: json + + { + "name": "...", + "domains": [], + "routes": [], + "require_ssl": "...", + "virtual_clusters": [], + "rate_limits": [], + "request_headers_to_add": [] + } + +name + *(required, string)* The logical name of the virtual host. This is used when emitting certain + statistics but is not relevant for forwarding. By default, the maximum length of the name is + limited to 60 characters. This limit can be increased by setting the + :option:`--max-obj-name-len` command line argument to the desired value. + +domains + *(required, array)* A list of domains (host/authority header) that will be matched to this + virtual host. Wildcard hosts are supported in the form of "\*.foo.com" or "\*-bar.foo.com". + Note that the wildcard will not match the empty string. e.g. "\*-bar.foo.com" will match + "baz-bar.foo.com" but not "-bar.foo.com". Additionally, a special entry "\*" is allowed + which will match any host/authority header. Only a single virtual host in the entire route + configuration can match on "\*". A domain must be unique across all virtual hosts or the config + will fail to load. + +:ref:`routes ` + *(required, array)* The list of routes that will be matched, in order, for incoming requests. + The first route that matches will be used. + +:ref:`cors ` + *(optional, object)* Specifies the virtual host's CORS policy. + +.. _config_http_conn_man_route_table_vhost_require_ssl: + +require_ssl + *(optional, string)* Specifies the type of TLS enforcement the virtual host expects. Possible + values are: + + all + All requests must use TLS. If a request is not using TLS, a 302 redirect will be sent telling + the client to use HTTPS. + + external_only + External requests must use TLS. If a request is external and it is not using TLS, a 302 redirect + will be sent telling the client to use HTTPS. + + If this option is not specified, there is no TLS requirement for the virtual host. + +:ref:`virtual_clusters ` + *(optional, array)* A list of virtual clusters defined for this virtual host. Virtual clusters + are used for additional statistics gathering. + +:ref:`rate_limits ` + *(optional, array)* Specifies a set of rate limit configurations that will be applied to the + virtual host. + +.. _config_http_conn_man_route_table_vhost_add_req_headers: + +request_headers_to_add + *(optional, array)* Specifies a list of HTTP headers that should be added to each + request handled by this virtual host. Headers are specified in the following form: + + .. code-block:: json + + [ + {"key": "header1", "value": "value1"}, + {"key": "header2", "value": "value2"} + ] + + For more information see the documentation on :ref:`custom request headers + `. diff --git a/docs/root/api-v1/runtime.rst b/docs/root/api-v1/runtime.rst new file mode 100644 index 000000000000..4cb67a4193b2 --- /dev/null +++ b/docs/root/api-v1/runtime.rst @@ -0,0 +1,34 @@ +.. _config_runtime_v1: + +Runtime +======= + +Runtime :ref:`configuration overview `. + +.. code-block:: json + + { + "symlink_root": "...", + "subdirectory": "...", + "override_subdirectory": "..." + } + +symlink_root + *(required, string)* The implementation assumes that the file system tree is accessed via a + symbolic link. An atomic link swap is used when a new tree should be switched to. This + parameter specifies the path to the symbolic link. Envoy will watch the location for changes + and reload the file system tree when they happen. + +subdirectory + *(required, string)* Specifies the subdirectory to load within the root directory. This is useful + if multiple systems share the same delivery mechanism. Envoy configuration elements can be + contained in a dedicated subdirectory. + +.. _config_runtime_override_subdirectory: + +override_subdirectory + *(optional, string)* Specifies an optional subdirectory to load within the root directory. If + specified and the directory exists, configuration values within this directory will override those + found in the primary subdirectory. This is useful when Envoy is deployed across many different + types of servers. Sometimes it is useful to have a per service cluster directory for runtime + configuration. See below for exactly how the override directory is used. diff --git a/docs/root/api-v1/tracing.rst b/docs/root/api-v1/tracing.rst new file mode 100644 index 000000000000..68bc38486900 --- /dev/null +++ b/docs/root/api-v1/tracing.rst @@ -0,0 +1,69 @@ +.. _config_tracing_v1: + +Tracing +======= + +The :ref:`tracing ` configuration specifies global settings for the HTTP +tracer used by Envoy. The configuration is defined on the :ref:`server's top level configuration +`. Envoy may support other tracers in the future, but right now the HTTP tracer is +the only one supported. + +.. code-block:: json + + { + "http": { + "driver": "{...}" + } + } + +http + *(optional, object)* Provides configuration for the HTTP tracer. + +driver + *(optional, object)* Provides the driver that handles trace and span creation. + +Currently `LightStep `_ and `Zipkin +`_ drivers are supported. + +LightStep driver +---------------- + +.. code-block:: json + + { + "type": "lightstep", + "config": { + "access_token_file": "...", + "collector_cluster": "..." + } + } + +access_token_file + *(required, string)* File containing the access token to the `LightStep `_ + API. + +collector_cluster + *(required, string)* The cluster manager cluster that hosts the LightStep collectors. + + +Zipkin driver +------------- + +.. code-block:: json + + { + "type": "zipkin", + "config": { + "collector_cluster": "...", + "collector_endpoint": "..." + } + } + +collector_cluster + *(required, string)* The cluster manager cluster that hosts the Zipkin collectors. Note that the + Zipkin cluster must be defined under `clusters` in the cluster manager configuration section. + +collector_endpoint + *(optional, string)* The API endpoint of the Zipkin service where the + spans will be sent. When using a standard Zipkin installation, the + API endpoint is typically `/api/v1/spans`, which is the default value. diff --git a/docs/root/api-v2/api.rst b/docs/root/api-v2/api.rst new file mode 100644 index 000000000000..a4259ad71513 --- /dev/null +++ b/docs/root/api-v2/api.rst @@ -0,0 +1,16 @@ +.. _envoy_api_reference: + +v2 API reference +================ + +.. toctree:: + :glob: + :maxdepth: 2 + + bootstrap/bootstrap + listeners/listeners + clusters/clusters + http_routes/http_routes + config/filter/filter + common_messages/common_messages + types/types diff --git a/docs/root/api-v2/bootstrap/bootstrap.rst b/docs/root/api-v2/bootstrap/bootstrap.rst new file mode 100644 index 000000000000..fd0fec12fc78 --- /dev/null +++ b/docs/root/api-v2/bootstrap/bootstrap.rst @@ -0,0 +1,12 @@ +Bootstrap +========= + +.. toctree:: + :glob: + :maxdepth: 2 + + ../config/bootstrap/v2/bootstrap.proto + ../config/metrics/v2/stats.proto + ../config/metrics/v2/metrics_service.proto + ../config/ratelimit/v2/rls.proto + ../config/trace/v2/trace.proto diff --git a/docs/root/api-v2/clusters/clusters.rst b/docs/root/api-v2/clusters/clusters.rst new file mode 100644 index 000000000000..8fe24ed0ad24 --- /dev/null +++ b/docs/root/api-v2/clusters/clusters.rst @@ -0,0 +1,13 @@ +Clusters +======== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../api/v2/cds.proto + ../api/v2/cluster/outlier_detection.proto + ../api/v2/cluster/circuit_breaker.proto + ../api/v2/endpoint/endpoint.proto + ../api/v2/eds.proto + ../api/v2/core/health_check.proto diff --git a/docs/root/api-v2/common_messages/common_messages.rst b/docs/root/api-v2/common_messages/common_messages.rst new file mode 100644 index 000000000000..3e9adab19baa --- /dev/null +++ b/docs/root/api-v2/common_messages/common_messages.rst @@ -0,0 +1,15 @@ +Common messages +=============== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../api/v2/core/base.proto + ../api/v2/core/address.proto + ../api/v2/core/protocol.proto + ../api/v2/discovery.proto + ../api/v2/core/config_source.proto + ../api/v2/core/grpc_service.proto + ../api/v2/auth/cert.proto + ../api/v2/ratelimit/ratelimit.proto diff --git a/docs/root/api-v2/config/filter/filter.rst b/docs/root/api-v2/config/filter/filter.rst new file mode 100644 index 000000000000..0793d3f8aa08 --- /dev/null +++ b/docs/root/api-v2/config/filter/filter.rst @@ -0,0 +1,11 @@ +Filters +======= + +.. toctree:: + :glob: + :maxdepth: 2 + + network/network + http/http + accesslog/v2/accesslog.proto + fault/v2/fault.proto diff --git a/docs/root/api-v2/config/filter/http/http.rst b/docs/root/api-v2/config/filter/http/http.rst new file mode 100644 index 000000000000..49eaeb7c6bd8 --- /dev/null +++ b/docs/root/api-v2/config/filter/http/http.rst @@ -0,0 +1,8 @@ +HTTP filters +============ + +.. toctree:: + :glob: + :maxdepth: 2 + + */v2/* diff --git a/docs/root/api-v2/config/filter/network/network.rst b/docs/root/api-v2/config/filter/network/network.rst new file mode 100644 index 000000000000..d61c09754fb0 --- /dev/null +++ b/docs/root/api-v2/config/filter/network/network.rst @@ -0,0 +1,8 @@ +Network filters +=============== + +.. toctree:: + :glob: + :maxdepth: 2 + + */v2/* diff --git a/docs/root/api-v2/http_routes/http_routes.rst b/docs/root/api-v2/http_routes/http_routes.rst new file mode 100644 index 000000000000..45a2dbca1d93 --- /dev/null +++ b/docs/root/api-v2/http_routes/http_routes.rst @@ -0,0 +1,9 @@ +HTTP route management +===================== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../api/v2/rds.proto + ../api/v2/route/route.proto diff --git a/docs/root/api-v2/listeners/listeners.rst b/docs/root/api-v2/listeners/listeners.rst new file mode 100644 index 000000000000..d933ccd32d66 --- /dev/null +++ b/docs/root/api-v2/listeners/listeners.rst @@ -0,0 +1,9 @@ +Listeners +========= + +.. toctree:: + :glob: + :maxdepth: 2 + + ../api/v2/lds.proto + ../api/v2/listener/listener.proto diff --git a/docs/root/api-v2/types/types.rst b/docs/root/api-v2/types/types.rst new file mode 100644 index 000000000000..116d6c3cb519 --- /dev/null +++ b/docs/root/api-v2/types/types.rst @@ -0,0 +1,9 @@ +Types +===== + +.. toctree:: + :glob: + :maxdepth: 2 + + ../type/percent.proto + ../type/range.proto diff --git a/docs/root/configuration/access_log.rst b/docs/root/configuration/access_log.rst new file mode 100644 index 000000000000..a70980111892 --- /dev/null +++ b/docs/root/configuration/access_log.rst @@ -0,0 +1,208 @@ +.. _config_access_log: + +Access logging +============== + +Configuration +------------------------- + +Access logs are configured as part of the :ref:`HTTP connection manager config +` or :ref:`TCP Proxy `. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _config_access_log_format: + +Format rules +------------ + +The access log format string contains either command operators or other characters interpreted as a +plain string. The access log formatter does not make any assumptions about a new line separator, so one +has to specified as part of the format string. +See the :ref:`default format ` for an example. +Note that the access log line will contain a '-' character for every not set/empty value. + +The same format strings are used by different types of access logs (such as HTTP and TCP). Some +fields may have slightly different meanings, depending on what type of log it is. Differences +are noted. + +The following command operators are supported: + +%START_TIME% + HTTP + Request start time including milliseconds. + + TCP + Downstream connection start time including milliseconds. + + START_TIME can be customized using a `format string `_, for example: + +.. code-block:: none + + %START_TIME(%Y/%m/%dT%H:%M:%S%z %s)% + +%BYTES_RECEIVED% + HTTP + Body bytes received. + + TCP + Downstream bytes received on connection. + +%PROTOCOL% + HTTP + Protocol. Currently either *HTTP/1.1* or *HTTP/2*. + + TCP + Not implemented ("-"). + +%RESPONSE_CODE% + HTTP + HTTP response code. Note that a response code of '0' means that the server never sent the + beginning of a response. This generally means that the (downstream) client disconnected. + + TCP + Not implemented ("-"). + +%BYTES_SENT% + HTTP + Body bytes sent. + + TCP + Downstream bytes sent on connection. + +%DURATION% + HTTP + Total duration in milliseconds of the request from the start time to the last byte out. + + TCP + Total duration in milliseconds of the downstream connection. + +%RESPONSE_FLAGS% + Additional details about the response or connection, if any. For TCP connections, the response codes mentioned in + the descriptions do not apply. Possible values are: + + HTTP and TCP + * **UH**: No healthy upstream hosts in upstream cluster in addition to 503 response code. + * **UF**: Upstream connection failure in addition to 503 response code. + * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. + * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code. + HTTP only + * **LH**: Local service failed :ref:`health check request ` in addition to 503 response code. + * **UT**: Upstream request timeout in addition to 504 response code. + * **LR**: Connection local reset in addition to 503 response code. + * **UR**: Upstream remote reset in addition to 503 response code. + * **UC**: Upstream connection termination in addition to 503 response code. + * **DI**: The request processing was delayed for a period specified via :ref:`fault injection `. + * **FI**: The request was aborted with a response code specified via :ref:`fault injection `. + * **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter ` in addition to 429 response code. + +%UPSTREAM_HOST% + Upstream host URL (e.g., tcp://ip:port for TCP connections). + +%UPSTREAM_CLUSTER% + Upstream cluster to which the upstream host belongs to. + +%UPSTREAM_LOCAL_ADDRESS% + Local address of the upstream connection. If the address is an IP address it includes both + address and port. + +%DOWNSTREAM_ADDRESS% + Remote address of the downstream connection *without IP port if the address is an IP address*. + + .. attention:: + + This field is deprecated. Use **DOWNSTREAM_REMOTE_ADDRESS** or + **DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT** instead. + +%DOWNSTREAM_REMOTE_ADDRESS% + Remote address of the downstream connection. If the address is an IP address it includes both + address and port. + + .. note:: + + This may not be the physical remote address of the peer if the address has been inferred from + :ref:`proxy proto ` or :ref:`x-forwarded-for + `. + +%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% + Remote address of the downstream connection. If the address is an IP address the output does + *not* include port. + + .. note:: + + This may not be the physical remote address of the peer if the address has been inferred from + :ref:`proxy proto ` or :ref:`x-forwarded-for + `. + +%DOWNSTREAM_LOCAL_ADDRESS% + Local address of the downstream connection. If the address is an IP address it includes both + address and port. + If the original connection was redirected by iptables REDIRECT, this represents + the original destination address restored by the + :ref:`Original Destination Filter ` using SO_ORIGINAL_DST socket option. + If the original connection was redirected by iptables TPROXY, and the listener's transparent + option was set to true, this represents the original destination address and port. + +%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% + Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. + +%REQ(X?Y):Z% + HTTP + An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an + optional parameter denoting string truncation up to Z characters long. The value is taken from + the HTTP request header named X first and if it's not set, then request header Y is used. If + none of the headers are present '-' symbol will be in the log. + + TCP + Not implemented ("-"). + +%RESP(X?Y):Z% + HTTP + Same as **%REQ(X?Y):Z%** but taken from HTTP response headers. + + TCP + Not implemented ("-"). + +%DYNAMIC_METADATA(NAMESPACE:KEY*):Z% + HTTP + :ref:`Dynamic Metadata ` info, + where NAMESPACE is the the filter namespace used when setting the metadata, KEY is an optional + lookup up key in the namespace with the option of specifying nested keys separated by ':', + and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata + can be set by filters using the :repo:`RequestInfo ` API: + *setDynamicMetadata*. The data will be logged as a JSON string. For example, for the following dynamic metadata: + + ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` + + * %DYNAMIC_METADATA(com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` + * %DYNAMIC_METADATA(com.test.my_filter:test_key)% will log: ``"foo"`` + * %DYNAMIC_METADATA(com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` + * %DYNAMIC_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``"bar"`` + * %DYNAMIC_METADATA(com.unknown_filter)% will log: ``-`` + * %DYNAMIC_METADATA(com.test.my_filter:unknown_key)% will log: ``-`` + * %DYNAMIC_METADATA(com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` + + TCP + Not implemented ("-"). + +.. _config_access_log_default_format: + +Default format +-------------- + +If custom format is not specified, Envoy uses the following default format: + +.. code-block:: none + + [%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" + %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% + %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" + "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%"\n + +Example of the default Envoy access log format: + +.. code-block:: none + + [2016-04-15T20:17:00.310Z] "POST /api/v1/locations HTTP/2" 204 - 154 0 226 100 "10.0.35.28" + "nsq2http" "cc21d9b0-cf5c-432b-8c7e-98aeb7988cd2" "locations" "tcp://10.0.2.1:80" diff --git a/docs/root/configuration/cluster_manager/cds.rst b/docs/root/configuration/cluster_manager/cds.rst new file mode 100644 index 000000000000..3ac34bc32d0c --- /dev/null +++ b/docs/root/configuration/cluster_manager/cds.rst @@ -0,0 +1,31 @@ +.. _config_cluster_manager_cds: + +Cluster discovery service +========================= + +The cluster discovery service (CDS) is an optional API that Envoy will call to dynamically fetch +cluster manager members. Envoy will reconcile the API response and add, modify, or remove known +clusters depending on what is required. + +.. note:: + + Any clusters that are statically defined within the Envoy configuration cannot be modified or + removed via the CDS API. + +* :ref:`v1 CDS API ` +* :ref:`v2 CDS API ` + +Statistics +---------- + +CDS has a statistics tree rooted at *cluster_manager.cds.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + config_reload, Counter, Total API fetches that resulted in a config reload due to a different config + update_attempt, Counter, Total API fetches attempted + update_success, Counter, Total API fetches completed successfully + update_failure, Counter, Total API fetches that failed (either network or schema errors) + version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst b/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst new file mode 100644 index 000000000000..331d59b8e799 --- /dev/null +++ b/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst @@ -0,0 +1,17 @@ +.. _config_cluster_manager_cluster_circuit_breakers: + +Circuit breaking +================ + +* Circuit Breaking :ref:`architecture overview `. +* :ref:`v1 API documentation `. +* :ref:`v2 API documentation `. + +Runtime +------- + +All circuit breaking settings are runtime configurable for all defined priorities based on cluster +name. They follow the following naming scheme ``circuit_breakers...``. +``cluster_name`` is the name field in each cluster's configuration, which is set in the envoy +:ref:`config file `. Available runtime settings will override +settings set in the envoy config file. diff --git a/docs/root/configuration/cluster_manager/cluster_hc.rst b/docs/root/configuration/cluster_manager/cluster_hc.rst new file mode 100644 index 000000000000..f08c519abf28 --- /dev/null +++ b/docs/root/configuration/cluster_manager/cluster_hc.rst @@ -0,0 +1,73 @@ +.. _config_cluster_manager_cluster_hc: + +Health checking +=============== + +* Health checking :ref:`architecture overview `. +* If health checking is configured for a cluster, additional statistics are emitted. They are + documented :ref:`here `. +* :ref:`v1 API documentation `. +* :ref:`v2 API documentation `. + +.. _config_cluster_manager_cluster_hc_tcp_health_checking: + +TCP health checking +------------------- + +.. attention:: + + This section is written for the v1 API but the concepts also apply to the v2 API. It will be + rewritten to target the v2 API in a future release. + +The type of matching performed is the following (this is the MongoDB health check request and +response): + +.. code-block:: json + + { + "send": [ + {"binary": "39000000"}, + {"binary": "EEEEEEEE"}, + {"binary": "00000000"}, + {"binary": "d4070000"}, + {"binary": "00000000"}, + {"binary": "746573742e"}, + {"binary": "24636d6400"}, + {"binary": "00000000"}, + {"binary": "FFFFFFFF"}, + {"binary": "13000000"}, + {"binary": "01"}, + {"binary": "70696e6700"}, + {"binary": "000000000000f03f"}, + {"binary": "00"} + ], + "receive": [ + {"binary": "EEEEEEEE"}, + {"binary": "01000000"}, + {"binary": "00000000"}, + {"binary": "0000000000000000"}, + {"binary": "00000000"}, + {"binary": "11000000"}, + {"binary": "01"}, + {"binary": "6f6b"}, + {"binary": "00000000000000f03f"}, + {"binary": "00"} + ] + } + +During each health check cycle, all of the "send" bytes are sent to the target server. Each +binary block can be of arbitrary length and is just concatenated together when sent. (Separating +into multiple blocks can be useful for readability). + +When checking the response, "fuzzy" matching is performed such that each binary block must be found, +and in the order specified, but not necessarily contiguous. Thus, in the example above, +"FFFFFFFF" could be inserted in the response between "EEEEEEEE" and "01000000" and the check +would still pass. This is done to support protocols that insert non-deterministic data, such as +time, into the response. + +Health checks that require a more complex pattern such as send/receive/send/receive are not +currently possible. + +If "receive" is an empty array, Envoy will perform "connect only" TCP health checking. During each +cycle, Envoy will attempt to connect to the upstream host, and consider it a success if the +connection succeeds. A new connection is created for each health check cycle. diff --git a/docs/root/configuration/cluster_manager/cluster_manager.rst b/docs/root/configuration/cluster_manager/cluster_manager.rst new file mode 100644 index 000000000000..d8fa69736141 --- /dev/null +++ b/docs/root/configuration/cluster_manager/cluster_manager.rst @@ -0,0 +1,17 @@ +.. _config_cluster_manager: + +Cluster manager +=============== + +.. toctree:: + :hidden: + + cluster_stats + cluster_runtime + cds + cluster_hc + cluster_circuit_breakers + +* Cluster manager :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` diff --git a/docs/root/configuration/cluster_manager/cluster_runtime.rst b/docs/root/configuration/cluster_manager/cluster_runtime.rst new file mode 100644 index 000000000000..6b412770ce90 --- /dev/null +++ b/docs/root/configuration/cluster_manager/cluster_runtime.rst @@ -0,0 +1,131 @@ +.. _config_cluster_manager_cluster_runtime: + +Runtime +======= + +Upstream clusters support the following runtime settings: + +Active health checking +---------------------- + +health_check.min_interval + Min value for the health checking :ref:`interval `. + Default value is 0. The health checking interval will be between *min_interval* and + *max_interval*. + +health_check.max_interval + Max value for the health checking :ref:`interval `. + Default value is MAX_INT. The health checking interval will be between *min_interval* and + *max_interval*. + +health_check.verify_cluster + What % of health check requests will be verified against the :ref:`expected upstream service + ` as the :ref:`health check filter + ` will write the remote service cluster into the response. + +.. _config_cluster_manager_cluster_runtime_outlier_detection: + +Outlier detection +----------------- + +See the outlier detection :ref:`architecture overview ` for more +information on outlier detection. The runtime parameters supported by outlier detection are the +same as the :ref:`static configuration parameters `, namely: + +outlier_detection.consecutive_5xx + :ref:`consecutive_5XX + ` + setting in outlier detection + +outlier_detection.consecutive_gateway_failure + :ref:`consecutive_gateway_failure + ` + setting in outlier detection + +outlier_detection.interval_ms + :ref:`interval_ms + ` + setting in outlier detection + +outlier_detection.base_ejection_time_ms + :ref:`base_ejection_time_ms + ` + setting in outlier detection + +outlier_detection.max_ejection_percent + :ref:`max_ejection_percent + ` + setting in outlier detection + +outlier_detection.enforcing_consecutive_5xx + :ref:`enforcing_consecutive_5xx + ` + setting in outlier detection + +outlier_detection.enforcing_consecutive_gateway_failure + :ref:`enforcing_consecutive_gateway_failure + ` + setting in outlier detection + +outlier_detection.enforcing_success_rate + :ref:`enforcing_success_rate + ` + setting in outlier detection + +outlier_detection.success_rate_minimum_hosts + :ref:`success_rate_minimum_hosts + ` + setting in outlier detection + +outlier_detection.success_rate_request_volume + :ref:`success_rate_request_volume + ` + setting in outlier detection + +outlier_detection.success_rate_stdev_factor + :ref:`success_rate_stdev_factor + ` + setting in outlier detection + +Core +---- + +upstream.healthy_panic_threshold + Sets the :ref:`panic threshold ` percentage. + Defaults to 50%. + +upstream.use_http2 + Whether the cluster utilizes the *http2* :ref:`feature ` + if configured. Set to 0 to disable HTTP/2 even if the feature is configured. Defaults to enabled. + +upstream.weight_enabled + Binary switch to turn on or off weighted load balancing. If set to non 0, weighted load balancing + is enabled. Defaults to enabled. + +.. _config_cluster_manager_cluster_runtime_zone_routing: + +Zone aware load balancing +------------------------- + +upstream.zone_routing.enabled + % of requests that will be routed to the same upstream zone. Defaults to 100% of requests. + +upstream.zone_routing.min_cluster_size + Minimal size of the upstream cluster for which zone aware routing can be attempted. Default value + is 6. If the upstream cluster size is smaller than *min_cluster_size* zone aware routing will not + be performed. + +Circuit breaking +---------------- + +circuit_breakers...max_connections + :ref:`Max connections circuit breaker setting ` + +circuit_breakers...max_pending_requests + :ref:`Max pending requests circuit breaker setting ` + +circuit_breakers...max_requests + :ref:`Max requests circuit breaker setting ` + +circuit_breakers...max_retries + :ref:`Max retries circuit breaker setting ` diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst new file mode 100644 index 000000000000..ef559ff19478 --- /dev/null +++ b/docs/root/configuration/cluster_manager/cluster_stats.rst @@ -0,0 +1,218 @@ +.. _config_cluster_manager_cluster_stats: + +Statistics +========== + +.. contents:: + :local: + +General +------- + +The cluster manager has a statistics tree rooted at *cluster_manager.* with the following +statistics. Any ``:`` character in the stats name is replaced with ``_``. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + cluster_added, Counter, Total clusters added (either via static config or CDS) + cluster_modified, Counter, Total clusters modified (via CDS) + cluster_removed, Counter, Total clusters removed (via CDS) + active_clusters, Gauge, Number of currently active (warmed) clusters + warming_clusters, Gauge, Number of currently warming (not active) clusters + +Every cluster has a statistics tree rooted at *cluster..* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_cx_total, Counter, Total connections + upstream_cx_active, Gauge, Total active connections + upstream_cx_http1_total, Counter, Total HTTP/1.1 connections + upstream_cx_http2_total, Counter, Total HTTP/2 connections + upstream_cx_connect_fail, Counter, Total connection failures + upstream_cx_connect_timeout, Counter, Total connection connect timeouts + upstream_cx_idle_timeout, Counter, Total connection idle timeouts + upstream_cx_connect_attempts_exceeded, Counter, Total consecutive connection failures exceeding configured connection attempts + upstream_cx_overflow, Counter, Total times that the cluster's connection circuit breaker overflowed + upstream_cx_connect_ms, Histogram, Connection establishment milliseconds + upstream_cx_length_ms, Histogram, Connection length milliseconds + upstream_cx_destroy, Counter, Total destroyed connections + upstream_cx_destroy_local, Counter, Total connections destroyed locally + upstream_cx_destroy_remote, Counter, Total connections destroyed remotely + upstream_cx_destroy_with_active_rq, Counter, Total connections destroyed with 1+ active request + upstream_cx_destroy_local_with_active_rq, Counter, Total connections destroyed locally with 1+ active request + upstream_cx_destroy_remote_with_active_rq, Counter, Total connections destroyed remotely with 1+ active request + upstream_cx_close_notify, Counter, Total connections closed via HTTP/1.1 connection close header or HTTP/2 GOAWAY + upstream_cx_rx_bytes_total, Counter, Total received connection bytes + upstream_cx_rx_bytes_buffered, Gauge, Received connection bytes currently buffered + upstream_cx_tx_bytes_total, Counter, Total sent connection bytes + upstream_cx_tx_bytes_buffered, Gauge, Send connection bytes currently buffered + upstream_cx_protocol_error, Counter, Total connection protocol errors + upstream_cx_max_requests, Counter, Total connections closed due to maximum requests + upstream_cx_none_healthy, Counter, Total times connection not established due to no healthy hosts + upstream_rq_total, Counter, Total requests + upstream_rq_active, Gauge, Total active requests + upstream_rq_pending_total, Counter, Total requests pending a connection pool connection + upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool circuit breaking and were failed + upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure + upstream_rq_pending_active, Gauge, Total active requests pending a connection pool connection + upstream_rq_cancelled, Counter, Total requests cancelled before obtaining a connection pool connection + upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode` + upstream_rq_timeout, Counter, Total requests that timed out waiting for a response + upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout + upstream_rq_rx_reset, Counter, Total requests that were reset remotely + upstream_rq_tx_reset, Counter, Total requests that were reset locally + upstream_rq_retry, Counter, Total request retries + upstream_rq_retry_success, Counter, Total request retry successes + upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking + upstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from upstream + upstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from upstream + upstream_flow_control_backed_up_total, Counter, Total number of times the upstream connection backed up and paused reads from downstream + upstream_flow_control_drained_total, Counter, Total number of times the upstream connection drained and resumed reads from downstream + membership_change, Counter, Total cluster membership changes + membership_healthy, Gauge, Current cluster healthy total (inclusive of both health checking and outlier detection) + membership_total, Gauge, Current cluster membership total + retry_or_shadow_abandoned, Counter, Total number of times shadowing or retry buffering was canceled due to buffer limits + config_reload, Counter, Total API fetches that resulted in a config reload due to a different config + update_attempt, Counter, Total cluster membership update attempts + update_success, Counter, Total cluster membership update successes + update_failure, Counter, Total cluster membership update failures + update_empty, Counter, Total cluster membership updates ending with empty cluster load assignment and continuing with previous config + version, Gauge, Hash of the contents from the last successful API fetch + max_host_weight, Gauge, Maximum weight of any host in the cluster + bind_errors, Counter, Total errors binding the socket to the configured source address + +Health check statistics +----------------------- + +If health check is configured, the cluster has an additional statistics tree rooted at +*cluster..health_check.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + attempt, Counter, Number of health checks + success, Counter, Number of successful health checks + failure, Counter, Number of immediately failed health checks (e.g. HTTP 503) as well as network failures + passive_failure, Counter, Number of health check failures due to passive events (e.g. x-envoy-immediate-health-check-fail) + network_failure, Counter, Number of health check failures due to network error + verify_cluster, Counter, Number of health checks that attempted cluster name verification + healthy, Gauge, Number of healthy members + +.. _config_cluster_manager_cluster_stats_outlier_detection: + +Outlier detection statistics +---------------------------- + +If :ref:`outlier detection ` is configured for a cluster, +statistics will be rooted at *cluster..outlier_detection.* and contain the following: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + ejections_enforced_total, Counter, Number of enforced ejections due to any outlier type + ejections_active, Gauge, Number of currently ejected hosts + ejections_overflow, Counter, Number of ejections aborted due to the max ejection % + ejections_enforced_consecutive_5xx, Counter, Number of enforced consecutive 5xx ejections + ejections_detected_consecutive_5xx, Counter, Number of detected consecutive 5xx ejections (even if unenforced) + ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections + ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced) + ejections_enforced_consecutive_gateway_failure, Counter, Number of enforced consecutive gateway failure ejections + ejections_detected_consecutive_gateway_failure, Counter, Number of detected consecutive gateway failure ejections (even if unenforced) + ejections_total, Counter, Deprecated. Number of ejections due to any outlier type (even if unenforced) + ejections_consecutive_5xx, Counter, Deprecated. Number of consecutive 5xx ejections (even if unenforced) + +.. _config_cluster_manager_cluster_stats_dynamic_http: + +Dynamic HTTP statistics +----------------------- + +If HTTP is used, dynamic HTTP response code statistics are also available. These are emitted by +various internal systems as well as some filters such as the :ref:`router filter +` and :ref:`rate limit filter `. They +are rooted at *cluster..* and contain the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" + upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" + upstream_rq_time, Histogram, Request time milliseconds + canary.upstream_rq_<\*xx>, Counter, Upstream canary aggregate HTTP response codes + canary.upstream_rq_<\*>, Counter, Upstream canary specific HTTP response codes + canary.upstream_rq_time, Histogram, Upstream canary request time milliseconds + internal.upstream_rq_<\*xx>, Counter, Internal origin aggregate HTTP response codes + internal.upstream_rq_<\*>, Counter, Internal origin specific HTTP response codes + internal.upstream_rq_time, Histogram, Internal origin request time milliseconds + external.upstream_rq_<\*xx>, Counter, External origin aggregate HTTP response codes + external.upstream_rq_<\*>, Counter, External origin specific HTTP response codes + external.upstream_rq_time, Histogram, External origin request time milliseconds + +.. _config_cluster_manager_cluster_stats_alt_tree: + +Alternate tree dynamic HTTP statistics +-------------------------------------- + +If alternate tree statistics are configured, they will be present in the +*cluster...* namespace. The statistics produced are the same as documented in +the dynamic HTTP statistics section :ref:`above +`. + +.. _config_cluster_manager_cluster_per_az_stats: + +Per service zone dynamic HTTP statistics +---------------------------------------- + +If the service zone is available for the local service (via :option:`--service-zone`) +and the :ref:`upstream cluster `, +Envoy will track the following statistics in *cluster..zone...* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" + upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" + upstream_rq_time, Histogram, Request time milliseconds + +Load balancer statistics +------------------------ + +Statistics for monitoring load balancer decisions. Stats are rooted at *cluster..* and contain +the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + lb_recalculate_zone_structures, Counter, The number of times locality aware routing structures are regenerated for fast decisions on upstream locality selection + lb_healthy_panic, Counter, Total requests load balanced with the load balancer in panic mode + lb_zone_cluster_too_small, Counter, No zone aware routing because of small upstream cluster size + lb_zone_routing_all_directly, Counter, Sending all requests directly to the same zone + lb_zone_routing_sampled, Counter, Sending some requests to the same zone + lb_zone_routing_cross_zone, Counter, Zone aware routing mode but have to send cross zone + lb_local_cluster_not_ok, Counter, Local host set is not set or it is panic mode for local cluster + lb_zone_number_differs, Counter, Number of zones in local and upstream cluster different + lb_zone_no_capacity_left, Counter, Total number of times ended with random zone selection due to rounding error + +Load balancer subset statistics +------------------------------- + +Statistics for monitoring `load balancer subset ` +decisions. Stats are rooted at *cluster..* and contain the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + lb_subsets_active, Gauge, Number of currently available subsets + lb_subsets_created, Counter, Number of subsets created + lb_subsets_removed, Counter, Number of subsets removed due to no hosts + lb_subsets_selected, Counter, Number of times any subset was selected for load balancing + lb_subsets_fallback, Counter, Number of times the fallback policy was invoked diff --git a/docs/root/configuration/configuration.rst b/docs/root/configuration/configuration.rst new file mode 100644 index 000000000000..10c309179fb2 --- /dev/null +++ b/docs/root/configuration/configuration.rst @@ -0,0 +1,22 @@ +.. _config: + +Configuration reference +======================= + +.. toctree:: + :maxdepth: 2 + :includehidden: + + overview/v1_overview + overview/v2_overview + listeners/listeners + listener_filters/listener_filters + network_filters/network_filters + http_conn_man/http_conn_man + http_filters/http_filters + cluster_manager/cluster_manager + access_log + rate_limit + runtime + statistics + tools/router_check diff --git a/docs/root/configuration/http_conn_man/header_sanitizing.rst b/docs/root/configuration/http_conn_man/header_sanitizing.rst new file mode 100644 index 000000000000..d57ffa737e06 --- /dev/null +++ b/docs/root/configuration/http_conn_man/header_sanitizing.rst @@ -0,0 +1,35 @@ +.. _config_http_conn_man_header_sanitizing: + +HTTP header sanitizing +====================== + +For security reasons, Envoy will "sanitize" various incoming HTTP headers depending on whether the +request is an internal or external request. The sanitizing action depends on the header and may +result in addition, removal, or modification. Ultimately, whether the request is considered internal +or external is governed by the :ref:`x-forwarded-for ` +header (please read the linked section carefully as how Envoy populates the header is complex and +depends on the :ref:`use_remote_address ` setting). + +Envoy will potentially sanitize the following headers: + +* :ref:`x-envoy-decorator-operation ` +* :ref:`x-envoy-downstream-service-cluster + ` +* :ref:`x-envoy-downstream-service-node ` +* :ref:`x-envoy-expected-rq-timeout-ms ` +* :ref:`x-envoy-external-address ` +* :ref:`x-envoy-force-trace ` +* :ref:`x-envoy-internal ` +* :ref:`x-envoy-max-retries ` +* :ref:`x-envoy-retry-grpc-on ` +* :ref:`x-envoy-retry-on ` +* :ref:`x-envoy-upstream-alt-stat-name ` +* :ref:`x-envoy-upstream-rq-per-try-timeout-ms + ` +* :ref:`x-envoy-upstream-rq-timeout-alt-response + ` +* :ref:`x-envoy-upstream-rq-timeout-ms ` +* :ref:`x-forwarded-client-cert ` +* :ref:`x-forwarded-for ` +* :ref:`x-forwarded-proto ` +* :ref:`x-request-id ` diff --git a/docs/root/configuration/http_conn_man/headers.rst b/docs/root/configuration/http_conn_man/headers.rst new file mode 100644 index 000000000000..282b32023080 --- /dev/null +++ b/docs/root/configuration/http_conn_man/headers.rst @@ -0,0 +1,482 @@ +.. _config_http_conn_man_headers: + +HTTP header manipulation +======================== + +The HTTP connection manager manipulates several HTTP headers both during decoding (when the request +is being received) as well as during encoding (when the response is being sent). + +.. contents:: + :local: + +.. _config_http_conn_man_headers_user-agent: + +user-agent +---------- + +The *user-agent* header may be set by the connection manager during decoding if the +:ref:`add_user_agent ` option is enabled. The header is only +modified if it is not already set. If the connection manager does set the header, the value is +determined by the :option:`--service-cluster` command line option. + +.. _config_http_conn_man_headers_server: + +server +------ + +The *server* header will be set during encoding to the value in the :ref:`server_name +` option. + +.. _config_http_conn_man_headers_x-client-trace-id: + +x-client-trace-id +----------------- + +If an external client sets this header, Envoy will join the provided trace ID with the internally +generated :ref:`config_http_conn_man_headers_x-request-id`. x-client-trace-id needs to be globally +unique and generating a uuid4 is recommended. If this header is set, it has similar effect to +:ref:`config_http_conn_man_headers_x-envoy-force-trace`. See the :ref:`tracing.client_enabled +` runtime configuration setting. + +.. _config_http_conn_man_headers_downstream-service-cluster: + +x-envoy-downstream-service-cluster +---------------------------------- + +Internal services often want to know which service is calling them. This header is cleaned from +external requests, but for internal requests will contain the service cluster of the caller. Note +that in the current implementation, this should be considered a hint as it is set by the caller and +could be easily spoofed by any internal entity. In the future Envoy will support a mutual +authentication TLS mesh which will make this header fully secure. Like *user-agent*, the value +is determined by the :option:`--service-cluster` command line option. In order to enable this +feature you need to set the :ref:`user_agent ` option to true. + +.. _config_http_conn_man_headers_downstream-service-node: + +x-envoy-downstream-service-node +------------------------------- + +Internal services may want to know the downstream node request comes from. This header +is quite similar to :ref:`config_http_conn_man_headers_downstream-service-cluster`, except the value is taken from +the :option:`--service-node` option. + +.. _config_http_conn_man_headers_x-envoy-external-address: + +x-envoy-external-address +------------------------ + +It is a common case where a service wants to perform analytics based on the origin client's IP +address. Per the lengthy discussion on :ref:`XFF `, +this can get quite complicated, so Envoy simplifies this by setting *x-envoy-external-address* +to the :ref:`trusted client address ` +if the request is from an external client. *x-envoy-external-address* is not set or overwritten +for internal requests. This header can be safely forwarded between internal services for analytics +purposes without having to deal with the complexities of XFF. + +.. _config_http_conn_man_headers_x-envoy-force-trace: + +x-envoy-force-trace +------------------- + +If an internal request sets this header, Envoy will modify the generated +:ref:`config_http_conn_man_headers_x-request-id` such that it forces traces to be collected. +This also forces :ref:`config_http_conn_man_headers_x-request-id` to be returned in the response +headers. If this request ID is then propagated to other hosts, traces will also be collected on +those hosts which will provide a consistent trace for an entire request flow. See the +:ref:`tracing.global_enabled ` and +:ref:`tracing.random_sampling ` runtime +configuration settings. + +.. _config_http_conn_man_headers_x-envoy-internal: + +x-envoy-internal +---------------- + +It is a common case where a service wants to know whether a request is internal origin or not. Envoy +uses :ref:`XFF ` to determine this and then will set +the header value to *true*. + +This is a convenience to avoid having to parse and understand XFF. + +.. _config_http_conn_man_headers_x-forwarded-client-cert: + +x-forwarded-client-cert +----------------------- + +*x-forwarded-client-cert* (XFCC) is a proxy header which indicates certificate information of part +or all of the clients or proxies that a request has flowed through, on its way from the client to the +server. A proxy may choose to sanitize/append/forward the XFCC header before proxying the request. + +The XFCC header value is a comma (",") separated string. Each substring is an XFCC element, which +holds information added by a single proxy. A proxy can append the current client certificate +information as an XFCC element, to the end of the request's XFCC header after a comma. + +Each XFCC element is a semicolon ";" separated string. Each substring is a key-value pair, grouped +together by an equals ("=") sign. The keys are case-insensitive, the values are case-sensitive. If +",", ";" or "=" appear in a value, the value should be double-quoted. Double-quotes in the value +should be replaced by backslash-double-quote (\"). + +The following keys are supported: + +1. ``By`` The Subject Alternative Name (URI type) of the current proxy's certificate. +2. ``Hash`` The SHA 256 diguest of the current client certificate. +3. ``Cert`` The entire client certificate in URL encoded PEM format. +4. ``Subject`` The Subject field of the current client certificate. The value is always double-quoted. +5. ``URI`` The URI type Subject Alternative Name field of the current client certificate. +6. ``DNS`` The DNS type Subject Alternative Name field of the current client certificate. A client certificate may contain multiple DNS type Subject Alternative Names, each will be a separate key-value pair. + +A client certificate may contain multiple Subject Alternative Name types. For details on different Subject Alternative Name types, please refer `RFC 2459`_. + +.. _RFC 2459: https://tools.ietf.org/html/rfc2459#section-4.2.1.7 + +Some examples of the XFCC header are: + +1. For one client certificate with only URI type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject="/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client";URI=http://testclient.lyft.com`` +2. For two client certificates with only URI type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;URI=http://testclient.lyft.com,By=http://backend.lyft.com;Hash=9ba61d6425303443c0748a02dd8de688468ed33be74eee6556d90c0149c1309e;URI=http://frontend.lyft.com`` +3. For one client certificate with both URI type and DNS type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject="/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client";URI=http://testclient.lyft.com;DNS=lyft.com;DNS=www.lyft.com`` + +How Envoy processes XFCC is specified by the +:ref:`forward_client_cert` and the +:ref:`set_current_client_cert_details` HTTP +connection manager options. If *forward_client_cert* is unset, the XFCC header will be sanitized by +default. + +.. _config_http_conn_man_headers_x-forwarded-for: + +x-forwarded-for +--------------- + +*x-forwarded-for* (XFF) is a standard proxy header which indicates the IP addresses that a request has +flowed through on its way from the client to the server. A compliant proxy will *append* the IP +address of the nearest client to the XFF list before proxying the request. Some examples of XFF are: + +1. ``x-forwarded-for: 50.0.0.1`` (single client) +2. ``x-forwarded-for: 50.0.0.1, 40.0.0.1`` (external proxy hop) +3. ``x-forwarded-for: 50.0.0.1, 10.0.0.1`` (internal proxy hop) + +Envoy will only append to XFF if the :ref:`use_remote_address +` HTTP connection manager option is set to true. +This means that if *use_remote_address* is false (which is the default), the connection manager +operates in a transparent mode where it does not modify XFF. + +.. attention:: + + In general, *use_remote_address* should be set to true when Envoy is deployed as an edge + node (aka a front proxy), whereas it may need to be set to false when Envoy is used as + an internal service node in a mesh deployment. + +.. _config_http_conn_man_headers_x-forwarded-for_trusted_client_address: + +The value of *use_remote_address* controls how Envoy determines the *trusted client address*. +Given an HTTP request that has traveled through a series of zero or more proxies to reach +Envoy, the trusted client address is the earliest source IP address that is known to be +accurate. The source IP address of the immediate downstream node's connection to Envoy is +trusted. XFF *sometimes* can be trusted. Malicious clients can forge XFF, but the last +address in XFF can be trusted if it was put there by a trusted proxy. + +Envoy's default rules for determining the trusted client address (*before* appending anything +to XFF) are: + +* If *use_remote_address* is false and an XFF containing at least one IP address is + present in the request, the trusted client address is the *last* (rightmost) IP address in XFF. +* Otherwise, the trusted client address is the source IP address of the immediate downstream + node's connection to Envoy. + +In an environment where there are one or more trusted proxies in front of an edge +Envoy instance, the *xff_num_trusted_hops* configuration option can be used to trust +additional addresses from XFF: + +* If *use_remote_address* is false and *xff_num_trusted_hops* is set to a value *N* that is + greater than zero, the trusted client address is the (N+1)th address from the right end + of XFF. (If the XFF contains fewer than N+1 addresses, Envoy falls back to using the + immediate downstream connection's source address as trusted client address.) +* If *use_remote_address* is true and *xff_num_trusted_hops* is set to a value *N* that is + greater than zero, the trusted client address is the Nth address from the right end + of XFF. (If the XFF contains fewer than N addresses, Envoy falls back to using the + immediate downstream connection's source address as trusted client address.) + +Envoy uses the trusted client address contents to determine whether a request originated +externally or internally. This influences whether the +:ref:`config_http_conn_man_headers_x-envoy-internal` header is set. + +Example 1: Envoy as edge proxy, without a trusted proxy in front of it + Settings: + | use_remote_address = true + | xff_num_trusted_hops = 0 + + Request details: + | Downstream IP address = 192.0.2.5 + | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1" + + Result: + | Trusted client address = 192.0.2.5 (XFF is ignored) + | X-Envoy-External-Address is set to 192.0.2.5 + | XFF is changed to "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" + | X-Envoy-Internal is removed (if it was present in the incoming request) + +Example 2: Envoy as internal proxy, with the Envoy edge proxy from Example 1 in front of it + Settings: + | use_remote_address = false + | xff_num_trusted_hops = 0 + + Request details: + | Downstream IP address = 10.11.12.13 (address of the Envoy edge proxy) + | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" + + Result: + | Trusted client address = 192.0.2.5 (last address in XFF is trusted) + | X-Envoy-External-Address is not modified + | X-Envoy-Internal is removed (if it was present in the incoming request) + +Example 3: Envoy as edge proxy, with two trusted external proxies in front of it + Settings: + | use_remote_address = true + | xff_num_trusted_hops = 2 + + Request details: + | Downstream IP address = 192.0.2.5 + | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1" + + Result: + | Trusted client address = 203.0.113.10 (2nd to last address in XFF is trusted) + | X-Envoy-External-Address is set to 203.0.113.10 + | XFF is changed to "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" + | X-Envoy-Internal is removed (if it was present in the incoming request) + +Example 4: Envoy as internal proxy, with the edge proxy from Example 3 in front of it + Settings: + | use_remote_address = false + | xff_num_trusted_hops = 2 + + Request details: + | Downstream IP address = 10.11.12.13 (address of the Envoy edge proxy) + | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" + + Result: + | Trusted client address = 203.0.113.10 + | X-Envoy-External-Address is not modified + | X-Envoy-Internal is removed (if it was present in the incoming request) + +Example 5: Envoy as an internal proxy, receiving a request from an internal client + Settings: + | use_remote_address = false + | xff_num_trusted_hops = 0 + + Request details: + | Downstream IP address = 10.20.30.40 (address of the internal client) + | XFF is not present + + Result: + | Trusted client address = 10.20.30.40 + | X-Envoy-External-Address remains unset + | X-Envoy-Internal is set to "true" + +Example 6: The internal Envoy from Example 5, receiving a request proxied by another Envoy + Settings: + | use_remote_address = false + | xff_num_trusted_hops = 0 + + Request details: + | Downstream IP address = 10.20.30.50 (address of the Envoy instance proxying to this one) + | XFF = "10.20.30.40" + + Result: + | Trusted client address = 10.20.30.40 + | X-Envoy-External-Address remains unset + | X-Envoy-Internal is set to "true" + +A few very important notes about XFF: + +1. If *use_remote_address* is set to true, Envoy sets the + :ref:`config_http_conn_man_headers_x-envoy-external-address` header to the trusted + client address. + +.. _config_http_conn_man_headers_x-forwarded-for_internal_origin: + +2. XFF is what Envoy uses to determine whether a request is internal origin or external origin. + If *use_remote_address* is set to true, the request is internal if and only if the + request contains no XFF and the immediate downstream node's connection to Envoy has + an internal (RFC1918 or RFC4193) source address. If *use_remote_address* is false, the + request is internal if and only if XFF contains a single RFC1918 or RFC4193 address. + + * **NOTE**: If an internal service proxies an external request to another internal service, and + includes the original XFF header, Envoy will append to it on egress if + :ref:`use_remote_address ` is set. This will cause + the other side to think the request is external. Generally, this is what is intended if XFF is + being forwarded. If it is not intended, do not forward XFF, and forward + :ref:`config_http_conn_man_headers_x-envoy-internal` instead. + * **NOTE**: If an internal service call is forwarded to another internal service (preserving XFF), + Envoy will not consider it internal. This is a known "bug" due to the simplification of how + XFF is parsed to determine if a request is internal. In this scenario, do not forward XFF and + allow Envoy to generate a new one with a single internal origin IP. +3. Testing IPv6 in a large multi-hop system can be difficult from a change management perspective. + For testing IPv6 compatibility of upstream services which parse XFF header values, + :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 ` + can be enabled in the v2 API. Envoy will append an IPv4 address in mapped IPv6 format, e.g. + ::FFFF:50.0.0.1. This change will also apply to + :ref:`config_http_conn_man_headers_x-envoy-external-address`. + +.. _config_http_conn_man_headers_x-forwarded-proto: + +x-forwarded-proto +----------------- + +It is a common case where a service wants to know what the originating protocol (HTTP or HTTPS) was +of the connection terminated by front/edge Envoy. *x-forwarded-proto* contains this information. It +will be set to either *http* or *https*. + +.. _config_http_conn_man_headers_x-request-id: + +x-request-id +------------ + +The *x-request-id* header is used by Envoy to uniquely identify a request as well as perform stable +access logging and tracing. Envoy will generate an *x-request-id* header for all external origin +requests (the header is sanitized). It will also generate an *x-request-id* header for internal +requests that do not already have one. This means that *x-request-id* can and should be propagated +between client applications in order to have stable IDs across the entire mesh. Due to the out of +process architecture of Envoy, the header can not be automatically forwarded by Envoy itself. This +is one of the few areas where a thin client library is needed to perform this duty. How that is done +is out of scope for this documentation. If *x-request-id* is propagated across all hosts, the +following features are available: + +* Stable :ref:`access logging ` via the + :ref:`v1 API runtime filter` or the + :ref:`v2 API runtime filter`. +* Stable tracing when performing random sampling via the :ref:`tracing.random_sampling + ` runtime setting or via forced tracing using the + :ref:`config_http_conn_man_headers_x-envoy-force-trace` and + :ref:`config_http_conn_man_headers_x-client-trace-id` headers. + +.. _config_http_conn_man_headers_x-ot-span-context: + +x-ot-span-context +----------------- + +The *x-ot-span-context* HTTP header is used by Envoy to establish proper parent-child relationships +between tracing spans when used with the LightStep tracer. +For example, an egress span is a child of an ingress +span (if the ingress span was present). Envoy injects the *x-ot-span-context* header on ingress requests and +forwards it to the local service. Envoy relies on the application to propagate *x-ot-span-context* on +the egress call to an upstream. See more on tracing :ref:`here `. + +.. _config_http_conn_man_headers_x-b3-traceid: + +x-b3-traceid +------------ + +The *x-b3-traceid* HTTP header is used by the Zipkin tracer in Envoy. +The TraceId is 64-bit in length and indicates the overall ID of the +trace. Every span in a trace shares this ID. See more on zipkin tracing +`here `. + +.. _config_http_conn_man_headers_x-b3-spanid: + +x-b3-spanid +----------- + +The *x-b3-spanid* HTTP header is used by the Zipkin tracer in Envoy. +The SpanId is 64-bit in length and indicates the position of the current +operation in the trace tree. The value should not be interpreted: it may or +may not be derived from the value of the TraceId. See more on zipkin tracing +`here `. + +.. _config_http_conn_man_headers_x-b3-parentspanid: + +x-b3-parentspanid +----------------- + +The *x-b3-parentspanid* HTTP header is used by the Zipkin tracer in Envoy. +The ParentSpanId is 64-bit in length and indicates the position of the +parent operation in the trace tree. When the span is the root of the trace +tree, the ParentSpanId is absent. See more on zipkin tracing +`here `. + +.. _config_http_conn_man_headers_x-b3-sampled: + +x-b3-sampled +------------ + +The *x-b3-sampled* HTTP header is used by the Zipkin tracer in Envoy. +When the Sampled flag is either not specified or set to 1, the span will be reported to the tracing +system. Once Sampled is set to 0 or 1, the same +value should be consistently sent downstream. See more on zipkin tracing +`here `. + +.. _config_http_conn_man_headers_x-b3-flags: + +x-b3-flags +---------- + +The *x-b3-flags* HTTP header is used by the Zipkin tracer in Envoy. +The encode one or more options. For example, Debug is encoded as +``X-B3-Flags: 1``. See more on zipkin tracing +`here `. + +.. _config_http_conn_man_headers_custom_request_headers: + +Custom request/response headers +------------------------------- + +Custom request/response headers can be added to a request/response at the weighted cluster, +route, virtual host, and/or global route configuration level. See the relevant :ref:`v1 +` and :ref:`v2 ` API +documentation. + +Headers are appended to requests/responses in the following order: weighted cluster level headers, +route level headers, virtual host level headers and finally global level headers. + +Envoy supports adding dynamic values to request and response headers. The percent symbol (%) is +used to delimit variable names. + +.. attention:: + + If a literal percent symbol (%) is desired in a request/response header, it must be escaped by + doubling it. For example, to emit a header with the value ``100%``, the custom header value in + the Envoy configuration must be ``100%%``. + +Supported variable names are: + +%CLIENT_IP% + The original client IP which is already added by Envoy as a + :ref:`x-forwarded-for ` request header. + + .. attention:: + + This field is deprecated. Use **DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT** instead. + +%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% + Remote address of the downstream connection. If the address is an IP address the output does + *not* include port. + + .. note:: + + This may not be the physical remote address of the peer if the address has been inferred from + :ref:`proxy proto ` or :ref:`x-forwarded-for + `. + +%DOWNSTREAM_LOCAL_ADDRESS% + Local address of the downstream connection. If the address is an IP address it includes both + address and port. + If the original connection was redirected by iptables REDIRECT, this represents + the original destination address restored by the + :ref:`Original Destination Filter ` using SO_ORIGINAL_DST socket option. + If the original connection was redirected by iptables TPROXY, and the listener's transparent + option was set to true, this represents the original destination address and port. + +%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% + Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. + +%PROTOCOL% + The original protocol which is already added by Envoy as a + :ref:`x-forwarded-proto ` request header. + +%UPSTREAM_METADATA(["namespace", "key", ...])% + Populates the header with :ref:`EDS endpoint metadata ` from the + upstream host selected by the router. Metadata may be selected from any namespace. In general, + metadata values may be strings, numbers, booleans, lists, nested structures, or null. Upstream + metadata values may be selected from nested structs by specifying multiple keys. Otherwise, + only string, boolean, and numeric values are supported. If the namespace or key(s) are not + found, or if the selected value is not a supported type, then no header is emitted. The + namespace and key(s) are specified as a JSON array of strings. Finally, percent symbols in the + parameters **do not** need to be escaped by doubling them. diff --git a/docs/root/configuration/http_conn_man/http_conn_man.rst b/docs/root/configuration/http_conn_man/http_conn_man.rst new file mode 100644 index 000000000000..0ec3a8bd008b --- /dev/null +++ b/docs/root/configuration/http_conn_man/http_conn_man.rst @@ -0,0 +1,20 @@ +.. _config_http_conn_man: + +HTTP connection manager +======================= + +* HTTP connection manager :ref:`architecture overview ` +* HTTP protocols :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. toctree:: + :hidden: + + route_matching + traffic_splitting + headers + header_sanitizing + stats + runtime + rds diff --git a/docs/root/configuration/http_conn_man/rds.rst b/docs/root/configuration/http_conn_man/rds.rst new file mode 100644 index 000000000000..7e65e1febfda --- /dev/null +++ b/docs/root/configuration/http_conn_man/rds.rst @@ -0,0 +1,30 @@ +.. _config_http_conn_man_rds: + +Route discovery service (RDS) +============================= + +The route discovery service (RDS) API is an optional API that Envoy will call to dynamically fetch +:ref:`route configurations `. A route configuration includes both +HTTP header modifications, virtual hosts, and the individual route entries contained within each +virtual host. Each :ref:`HTTP connection manager filter ` can independently +fetch its own route configuration via the API. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +Statistics +---------- + +RDS has a statistics tree rooted at *http..rds..*. +Any ``:`` character in the ``route_config_name`` name gets replaced with ``_`` in the +stats tree. The stats tree contains the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + config_reload, Counter, Total API fetches that resulted in a config reload due to a different config + update_attempt, Counter, Total API fetches attempted + update_success, Counter, Total API fetches completed successfully + update_failure, Counter, Total API fetches that failed (either network or schema errors) + version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/configuration/http_conn_man/route_matching.rst b/docs/root/configuration/http_conn_man/route_matching.rst new file mode 100644 index 000000000000..d6db6ae41abb --- /dev/null +++ b/docs/root/configuration/http_conn_man/route_matching.rst @@ -0,0 +1,19 @@ +.. _config_http_conn_man_route_table_route_matching: + +Route matching +============== + +.. attention:: + + This section is written for the v1 API but the concepts also apply to the v2 API. It will be + rewritten to target the v2 API in a future release. + +When Envoy matches a route, it uses the following procedure: + +#. The HTTP request's *host* or *:authority* header is matched to a :ref:`virtual host + `. +#. Each :ref:`route entry ` in the virtual host is checked, + *in order*. If there is a match, the route is used and no further route checks are made. +#. Independently, each :ref:`virtual cluster ` in the + virtual host is checked, *in order*. If there is a match, the virtual cluster is used and no + further virtual cluster checks are made. diff --git a/docs/root/configuration/http_conn_man/runtime.rst b/docs/root/configuration/http_conn_man/runtime.rst new file mode 100644 index 000000000000..9b5286bd02b6 --- /dev/null +++ b/docs/root/configuration/http_conn_man/runtime.rst @@ -0,0 +1,36 @@ +.. _config_http_conn_man_runtime: + +Runtime +======= + +The HTTP connection manager supports the following runtime settings: + +.. _config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6: + +http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + % of requests with a remote address that will have their IPv4 address mapped to IPv6. Defaults to + 0. + :ref:`use_remote_address ` + must also be enabled. See + :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + ` + for more details. + +.. _config_http_conn_man_runtime_client_enabled: + +tracing.client_enabled + % of requests that will be force traced if the + :ref:`config_http_conn_man_headers_x-client-trace-id` header is set. Defaults to 100. + +.. _config_http_conn_man_runtime_global_enabled: + +tracing.global_enabled + % of requests that will be traced after all other checks have been applied (force tracing, + sampling, etc.). Defaults to 100. + +.. _config_http_conn_man_runtime_random_sampling: + +tracing.random_sampling + % of requests that will be randomly traced. See :ref:`here ` for more + information. This runtime control is specified in the range 0-10000 and defaults to 10000. Thus, + trace sampling can be specified in 0.01% increments. diff --git a/docs/root/configuration/http_conn_man/stats.rst b/docs/root/configuration/http_conn_man/stats.rst new file mode 100644 index 000000000000..380d2f97ee81 --- /dev/null +++ b/docs/root/configuration/http_conn_man/stats.rst @@ -0,0 +1,126 @@ +.. _config_http_conn_man_stats: + +Statistics +========== + +Every connection manager has a statistics tree rooted at *http..* with the following +statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_cx_total, Counter, Total connections + downstream_cx_ssl_total, Counter, Total TLS connections + downstream_cx_http1_total, Counter, Total HTTP/1.1 connections + downstream_cx_websocket_total, Counter, Total WebSocket connections + downstream_cx_http2_total, Counter, Total HTTP/2 connections + downstream_cx_destroy, Counter, Total connections destroyed + downstream_cx_destroy_remote, Counter, Total connections destroyed due to remote close + downstream_cx_destroy_local, Counter, Total connections destroyed due to local close + downstream_cx_destroy_active_rq, Counter, Total connections destroyed with 1+ active request + downstream_cx_destroy_local_active_rq, Counter, Total connections destroyed locally with 1+ active request + downstream_cx_destroy_remote_active_rq, Counter, Total connections destroyed remotely with 1+ active request + downstream_cx_active, Gauge, Total active connections + downstream_cx_ssl_active, Gauge, Total active TLS connections + downstream_cx_http1_active, Gauge, Total active HTTP/1.1 connections + downstream_cx_websocket_active, Gauge, Total active WebSocket connections + downstream_cx_http2_active, Gauge, Total active HTTP/2 connections + downstream_cx_protocol_error, Counter, Total protocol errors + downstream_cx_length_ms, Histogram, Connection length milliseconds + downstream_cx_rx_bytes_total, Counter, Total bytes received + downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered + downstream_cx_tx_bytes_total, Counter, Total bytes sent + downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered + downstream_cx_drain_close, Counter, Total connections closed due to draining + downstream_cx_idle_timeout, Counter, Total connections closed due to idle timeout + downstream_flow_control_paused_reading_total, Counter, Total number of times reads were disabled due to flow control + downstream_flow_control_resumed_reading_total, Counter, Total number of times reads were enabled on the connection due to flow control + downstream_rq_total, Counter, Total requests + downstream_rq_http1_total, Counter, Total HTTP/1.1 requests + downstream_rq_http2_total, Counter, Total HTTP/2 requests + downstream_rq_active, Gauge, Total active requests + downstream_rq_response_before_rq_complete, Counter, Total responses sent before the request was complete + downstream_rq_rx_reset, Counter, Total request resets received + downstream_rq_tx_reset, Counter, Total request resets sent + downstream_rq_non_relative_path, Counter, Total requests with a non-relative HTTP path + downstream_rq_too_large, Counter, Total requests resulting in a 413 due to buffering an overly large body + downstream_rq_1xx, Counter, Total 1xx responses + downstream_rq_2xx, Counter, Total 2xx responses + downstream_rq_3xx, Counter, Total 3xx responses + downstream_rq_4xx, Counter, Total 4xx responses + downstream_rq_5xx, Counter, Total 5xx responses + downstream_rq_ws_on_non_ws_route, Counter, Total WebSocket upgrade requests rejected by non WebSocket routes + downstream_rq_time, Histogram, Request time milliseconds + rs_too_large, Counter, Total response errors due to buffering an overly large body + +Per user agent statistics +------------------------- + +Additional per user agent statistics are rooted at *http..user_agent..* +Currently Envoy matches user agent for both iOS (*ios*) and Android (*android*) and produces +the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_cx_total, Counter, Total connections + downstream_cx_destroy_remote_active_rq, Counter, Total connections destroyed remotely with 1+ active requests + downstream_rq_total, Counter, Total requests + +.. _config_http_conn_man_stats_per_listener: + +Per listener statistics +----------------------- + +Additional per listener statistics are rooted at *listener.
.http..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_rq_1xx, Counter, Total 1xx responses + downstream_rq_2xx, Counter, Total 2xx responses + downstream_rq_3xx, Counter, Total 3xx responses + downstream_rq_4xx, Counter, Total 4xx responses + downstream_rq_5xx, Counter, Total 5xx responses + +.. _config_http_conn_man_stats_per_codec: + +Per codec statistics +----------------------- + +Each codec has the option of adding per-codec statistics. Currently only http2 has codec stats. + +Http2 codec statistics +~~~~~~~~~~~~~~~~~~~~~~ + +All http2 statistics are rooted at *http2.* + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + rx_reset, Counter, Total number of reset stream frames received by Envoy + tx_reset, Counter, Total number of reset stream frames transmitted by Envoy + header_overflow, Counter, Total number of connections reset due to the headers being larger than `Envoy::Http::Http2::ConnectionImpl::StreamImpl::MAX_HEADER_SIZE` (63k) + trailers, Counter, Total number of trailers seen on requests coming from downstream + headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug + too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers + +Tracing statistics +------------------ + +Tracing statistics are emitted when tracing decisions are made. All tracing statistics are rooted at *http..tracing.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + random_sampling, Counter, Total number of traceable decisions by random sampling + service_forced, Counter, Total number of traceable decisions by server runtime flag *tracing.global_enabled* + client_enabled, Counter, Total number of traceable decisions by request header *x-envoy-force-trace* + not_traceable, Counter, Total number of non-traceable decisions by request id + health_check, Counter, Total number of non-traceable decisions by health check diff --git a/docs/root/configuration/http_conn_man/traffic_splitting.rst b/docs/root/configuration/http_conn_man/traffic_splitting.rst new file mode 100644 index 000000000000..bfa98aff6953 --- /dev/null +++ b/docs/root/configuration/http_conn_man/traffic_splitting.rst @@ -0,0 +1,145 @@ +.. _config_http_conn_man_route_table_traffic_splitting: + +Traffic Shifting/Splitting +=========================================== + +.. attention:: + + This section is written for the v1 API but the concepts also apply to the v2 API. It will be + rewritten to target the v2 API in a future release. + +.. contents:: + :local: + +Envoy's router can split traffic to a route in a virtual host across +two or more upstream clusters. There are two common use cases. + +1. Version upgrades: traffic to a route is shifted gradually +from one cluster to another. The +:ref:`traffic shifting ` +section describes this scenario in more detail. + +2. A/B testing or multivariate testing: ``two or more versions`` of +the same service are tested simultaneously. The traffic to the route has to +be *split* between clusters running different versions of the same +service. The +:ref:`traffic splitting ` +section describes this scenario in more detail. + +.. _config_http_conn_man_route_table_traffic_splitting_shift: + +Traffic shifting between two upstreams +-------------------------------------- + +The :ref:`runtime ` object +in the route configuration determines the probability of selecting a +particular route (and hence its cluster). By using the runtime +configuration, traffic to a particular route in a virtual host can be +gradually shifted from one cluster to another. Consider the following +example configuration, where two versions ``helloworld_v1`` and +``helloworld_v2`` of a service named ``helloworld`` are declared in the +envoy configuration file. + +.. code-block:: json + + { + "route_config": { + "virtual_hosts": [ + { + "name": "helloworld", + "domains": ["*"], + "routes": [ + { + "prefix": "/", + "cluster": "helloworld_v1", + "runtime": { + "key": "routing.traffic_shift.helloworld", + "default": 50 + } + }, + { + "prefix": "/", + "cluster": "helloworld_v2", + } + ] + } + ] + } + } + +Envoy matches routes with a :ref:`first match ` policy. +If the route has a runtime object, the request will be additionally matched based on the runtime +:ref:`value ` +(or the default, if no value is specified). Thus, by placing routes +back-to-back in the above example and specifying a runtime object in the +first route, traffic shifting can be accomplished by changing the runtime +value. The following are the approximate sequence of actions required to +accomplish the task. + +1. In the beginning, set ``routing.traffic_shift.helloworld`` to ``100``, + so that all requests to the ``helloworld`` virtual host would match with + the v1 route and be served by the ``helloworld_v1`` cluster. +2. To start shifting traffic to ``helloworld_v2`` cluster, set + ``routing.traffic_shift.helloworld`` to values ``0 < x < 100``. For + instance at ``90``, 1 out of every 10 requests to the ``helloworld`` + virtual host will not match the v1 route and will fall through to the v2 + route. +3. Gradually decrease the value set in ``routing.traffic_shift.helloworld`` + so that a larger percentage of requests match the v2 route. +4. When ``routing.traffic_shift.helloworld`` is set to ``0``, no requests + to the ``helloworld`` virtual host will match to the v1 route. All + traffic would now fall through to the v2 route and be served by the + ``helloworld_v2`` cluster. + + +.. _config_http_conn_man_route_table_traffic_splitting_split: + +Traffic splitting across multiple upstreams +------------------------------------------- + +Consider the ``helloworld`` example again, now with three versions (v1, v2 and +v3) instead of two. To split traffic evenly across the three versions +(i.e., ``33%, 33%, 34%``), the ``weighted_clusters`` option can be used to +specify the weight for each upstream cluster. + +Unlike the previous example, a **single** :ref:`route +` entry is sufficient. The +:ref:`weighted_clusters ` +configuration block in a route can be used to specify multiple upstream clusters +along with weights that indicate the **percentage** of traffic to be sent +to each upstream cluster. + +.. code-block:: json + + { + "route_config": { + "virtual_hosts": [ + { + "name": "helloworld", + "domains": ["*"], + "routes": [ + { + "prefix": "/", + "weighted_clusters": { + "runtime_key_prefix" : "routing.traffic_split.helloworld", + "clusters" : [ + { "name" : "helloworld_v1", "weight" : 33 }, + { "name" : "helloworld_v2", "weight" : 33 }, + { "name" : "helloworld_v3", "weight" : 34 } + ] + } + } + ] + } + ] + } + } + +By default, the weights must sum to exactly 100. In the V2 API, the +:ref:`total weight ` defaults to 100, but can +be modified to allow finer granularity. + +The weights assigned to each cluster can be dynamically adjusted using the +following runtime variables: ``routing.traffic_split.helloworld.helloworld_v1``, +``routing.traffic_split.helloworld.helloworld_v2`` and +``routing.traffic_split.helloworld.helloworld_v3``. diff --git a/docs/root/configuration/http_filters/buffer_filter.rst b/docs/root/configuration/http_filters/buffer_filter.rst new file mode 100644 index 000000000000..9fda71b389f4 --- /dev/null +++ b/docs/root/configuration/http_filters/buffer_filter.rst @@ -0,0 +1,23 @@ +.. _config_http_filters_buffer: + +Buffer +====== + +The buffer filter is used to stop filter iteration and wait for a fully buffered complete request. +This is useful in different situations including protecting some applications from having to deal +with partial requests and high network latency. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +Statistics +---------- + +The buffer filter outputs statistics in the *http..buffer.* namespace. The :ref:`stat +prefix ` comes from the owning HTTP connection manager. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + rq_timeout, Counter, Total requests that timed out waiting for a full request diff --git a/docs/root/configuration/http_filters/cors_filter.rst b/docs/root/configuration/http_filters/cors_filter.rst new file mode 100644 index 000000000000..436999a1d18d --- /dev/null +++ b/docs/root/configuration/http_filters/cors_filter.rst @@ -0,0 +1,12 @@ +.. _config_http_filters_cors: + +CORS +==== + +This is a filter which handles Cross-Origin Resource Sharing requests based on route or virtual host settings. +For the meaning of the headers please refer to the pages below. + +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS +- https://www.w3.org/TR/cors/ +- :ref:`v1 API reference ` +- :ref:`v2 API reference ` diff --git a/docs/root/configuration/http_filters/dynamodb_filter.rst b/docs/root/configuration/http_filters/dynamodb_filter.rst new file mode 100644 index 000000000000..5254afedd240 --- /dev/null +++ b/docs/root/configuration/http_filters/dynamodb_filter.rst @@ -0,0 +1,71 @@ +.. _config_http_filters_dynamo: + +DynamoDB +======== + +* DynamoDB :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +Statistics +---------- + +The DynamoDB filter outputs statistics in the *http..dynamodb.* namespace. The +:ref:`stat prefix ` comes from the owning HTTP connection manager. + +Per operation stats can be found in the *http..dynamodb.operation..* +namespace. + + .. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_total, Counter, Total number of requests with + upstream_rq_time, Histogram, Time spent on + upstream_rq_total_xxx, Counter, Total number of requests with per response code (503/2xx/etc) + upstream_rq_time_xxx, Histogram, Time spent on per response code (400/3xx/etc) + +Per table stats can be found in the *http..dynamodb.table..* namespace. +Most of the operations to DynamoDB involve a single table, but BatchGetItem and BatchWriteItem can +include several tables, Envoy tracks per table stats in this case only if it is the same table used +in all operations from the batch. + + .. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_total, Counter, Total number of requests on table + upstream_rq_time, Histogram, Time spent on table + upstream_rq_total_xxx, Counter, Total number of requests on table per response code (503/2xx/etc) + upstream_rq_time_xxx, Histogram, Time spent on table per response code (400/3xx/etc) + +*Disclaimer: Please note that this is a pre-release Amazon DynamoDB feature that is not yet widely available.* +Per partition and operation stats can be found in the *http..dynamodb.table..* +namespace. For batch operations, Envoy tracks per partition and operation stats only if it is the same +table used in all operations. + + .. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + capacity..__partition_id=, Counter, Total number of capacity for on table for a given + +Additional detailed stats: + +* For 4xx responses and partial batch operation failures, the total number of failures for a given + table and failure are tracked in the *http..dynamodb.error..* namespace. + + .. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + , Counter, Total number of specific for a given + BatchFailureUnprocessedKeys, Counter, Total number of partial batch failures for a given + +Runtime +------- + +The DynamoDB filter supports the following runtime settings: + +dynamodb.filter_enabled + The % of requests for which the filter is enabled. Default is 100%. diff --git a/docs/root/configuration/http_filters/fault_filter.rst b/docs/root/configuration/http_filters/fault_filter.rst new file mode 100644 index 000000000000..5c6b87e470aa --- /dev/null +++ b/docs/root/configuration/http_filters/fault_filter.rst @@ -0,0 +1,92 @@ +.. _config_http_filters_fault_injection: + +Fault Injection +=============== + +The fault injection filter can be used to test the resiliency of +microservices to different forms of failures. The filter can be used to +inject delays and abort requests with user-specified error codes, thereby +providing the ability to stage different failure scenarios such as service +failures, service overloads, high network latency, network partitions, +etc. Faults injection can be limited to a specific set of requests based on +the (destination) upstream cluster of a request and/or a set of pre-defined +request headers. + +The scope of failures is restricted to those that are observable by an +application communicating over the network. CPU and disk failures on the +local host cannot be emulated. + +Currently, the fault injection filter has the following limitations: + +* Abort codes are restricted to HTTP status codes only +* Delays are restricted to fixed duration. + +Future versions will include support for restricting faults to specific +routes, injecting *gRPC* and *HTTP/2* specific error codes and delay +durations based on distributions. + +Configuration +------------- + +.. note:: + + The fault injection filter must be inserted before any other filter, + including the router filter. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +Runtime +------- + +The HTTP fault injection filter supports the following global runtime settings: + +fault.http.abort.abort_percent + % of requests that will be aborted if the headers match. Defaults to the + *abort_percent* specified in config. If the config does not contain an + *abort* block, then *abort_percent* defaults to 0. + +fault.http.abort.http_status + HTTP status code that will be used as the of requests that will be + aborted if the headers match. Defaults to the HTTP status code specified + in the config. If the config does not contain an *abort* block, then + *http_status* defaults to 0. + +fault.http.delay.fixed_delay_percent + % of requests that will be delayed if the headers match. Defaults to the + *delay_percent* specified in the config or 0 otherwise. + +fault.http.delay.fixed_duration_ms + The delay duration in milliseconds. If not specified, the + *fixed_duration_ms* specified in the config will be used. If this field + is missing from both the runtime and the config, no delays will be + injected. + +*Note*, fault filter runtime settings for the specific downstream cluster +override the default ones if present. The following are downstream specific +runtime keys: + +* fault.http..abort.abort_percent +* fault.http..abort.http_status +* fault.http..delay.fixed_delay_percent +* fault.http..delay.fixed_duration_ms + +Downstream cluster name is taken from +:ref:`the HTTP x-envoy-downstream-service-cluster ` +header. If the following settings are not found in the runtime it defaults to the global runtime settings +which defaults to the config settings. + +Statistics +---------- + +The fault filter outputs statistics in the *http..fault.* namespace. The :ref:`stat +prefix ` comes from the owning HTTP connection manager. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + delays_injected, Counter, Total requests that were delayed + aborts_injected, Counter, Total requests that were aborted + .delays_injected, Counter, Total delayed requests for the given downstream cluster + .aborts_injected, Counter, Total aborted requests for the given downstream cluster diff --git a/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst b/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst new file mode 100644 index 000000000000..5af008dcef9b --- /dev/null +++ b/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst @@ -0,0 +1,50 @@ +.. _config_http_filters_grpc_bridge: + +gRPC HTTP/1.1 bridge +==================== + +* gRPC :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +This is a simple filter which enables the bridging of an HTTP/1.1 client which does not support +response trailers to a compliant gRPC server. It works by doing the following: + +* When a request is sent, the filter sees if the connection is HTTP/1.1 and the request content type + is *application/grpc*. +* If so, when the response is received, the filter buffers it and waits for trailers and then checks the + *grpc-status* code. If it is not zero, the filter switches the HTTP response code to 503. It also copies + the *grpc-status* and *grpc-message* trailers into the response headers so that the client can look + at them if it wishes. +* The client should send HTTP/1.1 requests that translate to the following pseudo headers: + + * *\:method*: POST + * *\:path*: + * *content-type*: application/grpc + +* The body should be the serialized grpc body which is: + + * 1 byte of zero (not compressed). + * network order 4 bytes of proto message length. + * serialized proto message. + +* Because this scheme must buffer the response to look for the *grpc-status* trailer it will only + work with unary gRPC APIs. + +This filter also collects stats for all gRPC requests that transit, even if those requests are +normal gRPC requests over HTTP/2. + +More info: wire format in `gRPC over HTTP/2 `_. + +Statistics +---------- + +The filter emits statistics in the *cluster..grpc.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + ..success, Counter, Total successful service/method calls + ..failure, Counter, Total failed service/method calls + ..total, Counter, Total service/method calls diff --git a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst new file mode 100644 index 000000000000..3eaa96ae6a2c --- /dev/null +++ b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst @@ -0,0 +1,37 @@ +.. _config_http_filters_grpc_json_transcoder: + +gRPC-JSON transcoder +==================== + +* gRPC :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +This is a filter which allows a RESTful JSON API client to send requests to Envoy over HTTP +and get proxied to a gRPC service. The HTTP mapping for the gRPC service has to be defined by +`custom options `_. + +.. _config_grpc_json_generate_proto_descriptor_set: + +How to generate proto descriptor set +------------------------------------ + +Envoy has to know the proto descriptor of your gRPC service in order to do the transcoding. + +To generate a protobuf descriptor set for the gRPC service, you'll also need to clone the +googleapis repository from GitHub before running protoc, as you'll need annotations.proto +in your include path, to define the HTTP mapping. + +.. code-block:: bash + + git clone https://github.com/googleapis/googleapis + GOOGLEAPIS_DIR= + +Then run protoc to generate the descriptor set from bookstore.proto: + +.. code-block:: bash + + protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ + --descriptor_set_out=proto.pb test/proto/bookstore.proto + +If you have more than one proto source files, you can pass all of them in one command. diff --git a/docs/root/configuration/http_filters/grpc_web_filter.rst b/docs/root/configuration/http_filters/grpc_web_filter.rst new file mode 100644 index 000000000000..2fe81100f156 --- /dev/null +++ b/docs/root/configuration/http_filters/grpc_web_filter.rst @@ -0,0 +1,11 @@ +.. _config_http_filters_grpc_web: + +gRPC-Web +======== + +* gRPC :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +This is a filter which enables the bridging of a gRPC-Web client to a compliant gRPC server by +following https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md. diff --git a/docs/root/configuration/http_filters/gzip_filter.rst b/docs/root/configuration/http_filters/gzip_filter.rst new file mode 100644 index 000000000000..e1daf6a702a5 --- /dev/null +++ b/docs/root/configuration/http_filters/gzip_filter.rst @@ -0,0 +1,51 @@ +.. _config_http_filters_gzip: + +Gzip +==== +Gzip is an HTTP filter which enables Envoy to compress dispatched data +from an upstream service upon client request. Compression is useful in +situations where large payloads need to be transmitted without +compromising the response time. + +Configuration +------------- +* :ref:`v2 API reference ` + +.. attention:: + + The *window bits* is a number that tells the compressor how far ahead in the + text the algorithm should be looking for repeated sequence of characters. + Due to a known bug in the underlying zlib library, *window bits* with value + eight does not work as expected. Therefore any number below that will be + automatically set to 9. This issue might be solved in future releases of + the library. + +How it works +------------ +When gzip filter is enabled, request and response headers are inspected to +determine whether or not the content should be compressed. The content is +compressed and then sent to the client with the appropriate headers if either +response and request allow. + +By *default* compression will be *skipped* when: + +- A request does NOT contain *accept-encoding* header. +- A request includes *accept-encoding* header, but it does not contain "gzip". +- A response contains a *content-encoding* header. +- A Response contains a *cache-control* header whose value includes "no-transform". +- A response contains a *transfer-encoding* header whose value includes "gzip". +- A response does not contain a *content-type* value that matches one of the selected + mime-types, which default to *application/javascript*, *application/json*, + *application/xhtml+xml*, *image/svg+xml*, *text/css*, *text/html*, *text/plain*, + *text/xml*. +- Neither *content-length* nor *transfer-encoding* headers are present in + the response. +- Response size is smaller than 30 bytes (only applicable when *transfer-encoding* + is not chuncked). + +When compression is *applied*: + +- The *content-length* is removed from response headers. +- Response headers contain "*transfer-encoding: chunked*" and + "*content-encoding: gzip*". +- The "*vary: accept-encoding*" header is inserted on every response. diff --git a/docs/root/configuration/http_filters/health_check_filter.rst b/docs/root/configuration/http_filters/health_check_filter.rst new file mode 100644 index 000000000000..490b869b3a32 --- /dev/null +++ b/docs/root/configuration/http_filters/health_check_filter.rst @@ -0,0 +1,17 @@ +.. _config_http_filters_health_check: + +Health check +============ + +* Health check filter :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. note:: + + Note that the filter will automatically fail health checks and set the + :ref:`x-envoy-immediate-health-check-fail + ` header if the + :ref:`/healthcheck/fail ` admin endpoint has been + called. (The :ref:`/healthcheck/ok ` admin endpoint + reverses this behavior). diff --git a/docs/root/configuration/http_filters/http_filters.rst b/docs/root/configuration/http_filters/http_filters.rst new file mode 100644 index 000000000000..e7a513f7001c --- /dev/null +++ b/docs/root/configuration/http_filters/http_filters.rst @@ -0,0 +1,22 @@ +.. _config_http_filters: + +HTTP filters +============ + +.. toctree:: + :maxdepth: 2 + + buffer_filter + cors_filter + dynamodb_filter + fault_filter + grpc_http1_bridge_filter + grpc_json_transcoder_filter + grpc_web_filter + gzip_filter + health_check_filter + ip_tagging_filter + lua_filter + rate_limit_filter + router_filter + squash_filter diff --git a/docs/root/configuration/http_filters/ip_tagging_filter.rst b/docs/root/configuration/http_filters/ip_tagging_filter.rst new file mode 100644 index 000000000000..95fd84bcaf11 --- /dev/null +++ b/docs/root/configuration/http_filters/ip_tagging_filter.rst @@ -0,0 +1,41 @@ +.. _config_http_filters_ip_tagging: + +IP Tagging +========== + +The HTTP IP Tagging filter sets the header *x-envoy-ip-tags* with the string tags for the trusted address from +:ref:`x-forwarded-for `. If there are no tags for an address, +the header is not set. + +The implementation for IP Tagging provides a scalable way to compare an IP address to a large list of CIDR +ranges efficiently. The underlying algorithm for storing tags and IP address subnets is a Level-Compressed trie +described in the paper `IP-address lookup using +LC-tries `_ by S. Nilsson and +G. Karlsson. + + +Configuration +------------- +* :ref:`v2 API reference ` + +Statistics +---------- + +The IP Tagging filter outputs statistics in the *http..ip_tagging.* namespace. The stat prefix comes from +the owning HTTP connection manager. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + .hit, Counter, Total number of requests that have the applied to it + no_hit, Counter, Total number of requests with no applicable IP tags + total, Counter, Total number of requests the IP Tagging Filter operated on + +Runtime +------- + +The IP Tagging filter supports the following runtime settings: + +ip_tagging.http_filter_enabled + The % of requests for which the filter is enabled. Default is 100. diff --git a/docs/root/configuration/http_filters/lua_filter.rst b/docs/root/configuration/http_filters/lua_filter.rst new file mode 100644 index 000000000000..be55da393b2b --- /dev/null +++ b/docs/root/configuration/http_filters/lua_filter.rst @@ -0,0 +1,417 @@ +.. _config_http_filters_lua: + +Lua +=== + +.. attention:: + + The Lua scripting HTTP filter is **experimental**. Use in production at your own risk. It is + being released for initial feedback on the exposed API and for further development, testing, + and verification. This warning will be removed when we feel that the filter has received enough + testing and API stability to call it generally production ready. + +.. attention:: + + By default Envoy is built without exporting symbols that you may need when interacting with Lua + modules installed as shared objects. Envoy may need to be built with support for exported symbols. + Please see the :repo:`Bazel docs ` for more information. + +Overview +-------- + +The HTTP Lua filter allows `Lua `_ scripts to be run during both the request +and response flows. `LuaJIT `_ is used as the runtime. Because of this, the +supported Lua version is mostly 5.1 with some 5.2 features. See the `LuaJIT documentation +`_ for more details. + +The filter only supports loading Lua code in-line in the configuration. If local filesystem code +is desired, a trivial in-line script can be used to load the rest of the code from the local +environment. + +The design of the filter and Lua support at a high level is as follows: + +* All Lua environments are :ref:`per worker thread `. This means that + there is no truly global data. Any globals create and populated at load time will be visible + from each worker thread in isolation. True global support may be added via an API in the future. +* All scripts are run as coroutines. This means that they are written in a synchronous style even + though they may perform complex asynchronous tasks. This makes the scripts substantially easier + to write. All network/async processing is performed by Envoy via a set of APIs. Envoy will + yield the script as appropriate and resume it when async tasks are complete. +* **Do not perform blocking operations from scripts.** It is critical for performance that + Envoy APIs are used for all IO. + +Currently supported high level features +--------------------------------------- + +**NOTE:** It is expected that this list will expand over time as the filter is used in production. +The API surface has been kept small on purpose. The goal is to make scripts extremely simple and +safe to write. Very complex or high performance use cases are assumed to use the native C++ filter +API. + +* Inspection of headers, body, and trailers while streaming in either the request flow, response + flow, or both. +* Modification of headers and trailers. +* Blocking and buffering the full request/response body for inspection. +* Performing an outbound async HTTP call to an upstream host. Such a call can be performed while + buffering body data so that when the call completes upstream headers can be modified. +* Performing a direct response and skipping further filter iteration. For example, a script + could make an upstream HTTP call for authentication, and then directly respond with a 403 + response code. + +Configuration +------------- + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +Script examples +--------------- + +This section provides some concrete examples of Lua scripts as a more gentle introduction and quick +start. Please refer to the :ref:`stream handle API ` for +more details on the supported API. + +.. code-block:: lua + + -- Called on the request path. + function envoy_on_request(request_handle) + -- Wait for the entire request body and add a request header with the body size. + request_handle:headers():add("request_body_size", request_handle:body():length()) + end + + -- Called on the response path. + function envoy_on_response(response_handle) + -- Wait for the entire response body and a response header with the the body size. + response_handle:headers():add("response_body_size", response_handle:body():length()) + -- Remove a response header named 'foo' + response_handle:headers():remove("foo") + end + +.. code-block:: lua + + function envoy_on_request(request_handle) + -- Make an HTTP call to an upstream host with the following headers, body, and timeout. + local headers, body = request_handle:httpCall( + "lua_cluster", + { + [":method"] = "POST", + [":path"] = "/", + [":authority"] = "lua_cluster" + }, + "hello world", + 5000) + + -- Add information from the HTTP call into the headers that are about to be sent to the next + -- filter in the filter chain. + request_handle:headers():add("upstream_foo", headers["foo"]) + request_handle:headers():add("upstream_body_size", #body) + end + +.. code-block:: lua + + function envoy_on_request(request_handle) + -- Make an HTTP call. + local headers, body = request_handle:httpCall( + "lua_cluster", + { + [":method"] = "POST", + [":path"] = "/", + [":authority"] = "lua_cluster" + }, + "hello world", + 5000) + + -- Response directly and set a header from the HTTP call. No further filter iteration + -- occurs. + request_handle:respond( + {[":status"] = "403", + ["upstream_foo"] = headers["foo"]}, + "nope") + end + +.. _config_http_filters_lua_stream_handle_api: + +Stream handle API +----------------- + +When Envoy loads the script in the configuration, it looks for two global functions that the +script defines: + +.. code-block:: lua + + function envoy_on_request(request_handle) + end + + function envoy_on_response(response_handle) + end + +A script can define either or both of these functions. During the request path, Envoy will +run *envoy_on_request* as a coroutine, passing an API handle. During the response path, Envoy will +run *envoy_on_response* as a coroutine, passing an API handle. + +.. attention:: + + It is critical that all interaction with Envoy occur through the passed stream handle. The stream + handle should not be assigned to any global variable and should not be used outside of the + coroutine. Envoy will fail your script if the handle is used incorrectly. + +The following methods on the stream handle are supported: + +headers() +^^^^^^^^^ + +.. code-block:: lua + + headers = handle:headers() + +Returns the stream's headers. The headers can be modified as long as they have not been sent to +the next filter in the header chain. For example, they can be modified after an *httpCall()* or +after a *body()* call returns. The script will fail if the headers are modified in any other +situation. + +Returns a :ref:`header object `. + +body() +^^^^^^ + +.. code-block:: lua + + body = handle:body() + +Returns the stream's body. This call will cause Envoy to yield the script until the entire body +has been buffered. Note that all buffering must adhere to the flow control policies in place. +Envoy will not buffer more data than is allowed by the connection manager. + +Returns a :ref:`buffer object `. + +bodyChunks() +^^^^^^^^^^^^ + +.. code-block:: lua + + iterator = handle:bodyChunks() + +Returns an iterator that can be used to iterate through all received body chunks as they arrive. +Envoy will yield the script in between chunks, but *will not buffer* them. This can be used by +a script to inspect data as it is streaming by. + +.. code-block:: lua + + for chunk in request_handle:bodyChunks() do + request_handle:log(0, chunk:length()) + end + +Each chunk the iterator returns is a :ref:`buffer object `. + +trailers() +^^^^^^^^^^ + +.. code-block:: lua + + trailers = handle:trailers() + +Returns the stream's trailers. May return nil if there are no trailers. The trailers may be +modified before they are sent to the next filter. + +Returns a :ref:`header object `. + +log*() +^^^^^^ + +.. code-block:: lua + + handle:logTrace(message) + handle:logDebug(message) + handle:logInfo(message) + handle:logWarn(message) + handle:logErr(message) + handle:logCritical(message) + +Logs a message using Envoy's application logging. *message* is a string to log. + +httpCall() +^^^^^^^^^^ + +.. code-block:: lua + + headers, body = handle:httpCall(cluster, headers, body, timeout) + +Makes an HTTP call to an upstream host. Envoy will yield the script until the call completes or +has an error. *cluster* is a string which maps to a configured cluster manager cluster. *headers* +is a table of key/value pairs to send. Note that the *:method*, *:path*, and *:authority* headers +must be set. *body* is an optional string of body data to send. *timeout* is an integer that +specifies the call timeout in milliseconds. + +Returns *headers* which is a table of response headers. Returns *body* which is the string response +body. May be nil if there is no body. + +respond() +^^^^^^^^^^ + +.. code-block:: lua + + handle:respond(headers, body) + +Respond immediately and do not continue further filter iteration. This call is *only valid in +the request flow*. Additionally, a response is only possible if request headers have not yet been +passed to subsequent filters. Meaning, the following Lua code is invalid: + +.. code-block:: lua + + function envoy_on_request(request_handle) + for chunk in request_handle:bodyChunks() do + request_handle:respond( + {[":status"] = "100"}, + "nope") + end + end + +*headers* is a table of key/value pairs to send. Note that the *:status* header +must be set. *body* is a string and supplies the optional response body. May be nil. + +metadata() +^^^^^^^^^^ + +.. code-block:: lua + + metadata = handle:metadata() + +Returns the current route entry metadata. Note that the metadata should be specified +under the filter name i.e. *envoy.lua*. Below is an example of a *metadata* in a +:ref:`route entry `. + +.. code-block:: yaml + + metadata: + filter_metadata: + envoy.lua: + foo: bar + baz: + - bad + - baz + +Returns a :ref:`metadata object `. + +.. _config_http_filters_lua_header_wrapper: + +Header object API +----------------- + +add() +^^^^^ + +.. code-block:: lua + + headers:add(key, value) + +Adds a header. *key* is a string that supplies the header key. *value* is a string that supplies +the header value. + +.. attention:: + + Envoy treats certain headers specially. These are known as the O(1) or *inline* headers. The + list of inline headers can be found `here `_. + If an inline header is already present in the header map, *add()* will have no effect. If + attempting to *add()* a non-inline header, the additional header will be added so that the + resultant headers contains multiple header entries with the same name. Consider using the + *replace* function if want to replace a header with another value. Note also that we + understand this behavior is confusing and we may change it in a future release. + +get() +^^^^^ + +.. code-block:: lua + + headers:get(key) + +Gets a header. *key* is a string that supplies the header key. Returns a string that is the header +value or nil if there is no such header. + +__pairs() +^^^^^^^^^ + +.. code-block:: lua + + for key, value in pairs(headers) do + end + +Iterates through every header. *key* is a string that supplies the header key. *value* is a string +that supplies the header value. + +.. attention:: + + In the currently implementation, headers cannot be modified during iteration. Additionally, if + it is desired to modify headers after iteration, the iteration must be completed. Meaning, do + not use `break` or any other mechanism to exit the loop early. This may be relaxed in the future. + +remove() +^^^^^^^^ + +.. code-block:: lua + + headers:remove(key) + +Removes a header. *key* supplies the header key to remove. + +replace() +^^^^^^^^^ + +.. code-block:: lua + + headers:replace(key, value) + +Replaces a header. *key* is a string that supplies the header key. *value* is a string that supplies +the header value. If the header does not exist, it is added as per the *add()* function. + +.. _config_http_filters_lua_buffer_wrapper: + +Buffer API +---------- + +length() +^^^^^^^^^^ + +.. code-block:: lua + + size = buffer:length() + +Gets the size of the buffer in bytes. Returns an integer. + +getBytes() +^^^^^^^^^^ + +.. code-block:: lua + + buffer:getBytes(index, length) + +Get bytes from the buffer. By default Envoy will not copy all buffer bytes to Lua. This will +cause a buffer segment to be copied. *index* is an integer and supplies the buffer start index to +copy. *length* is an integer and supplies the buffer length to copy. *index* + *length* must be +less than the buffer length. + +.. _config_http_filters_lua_metadata_wrapper: + +Metadata object API +------------------- + +get() +^^^^^ + +.. code-block:: lua + + metadata:get(key) + +Gets a metadata. *key* is a string that supplies the metadata key. Returns the corresponding +value of the given metadata key. The type of the value can be: *null*, *boolean*, *number*, +*string* and *table*. + +__pairs() +^^^^^^^^^ + +.. code-block:: lua + + for key, value in pairs(metadata) do + end + +Iterates through every *metadata* entry. *key* is a string that supplies a *metadata* +key. *value* is *metadata* entry value. diff --git a/docs/root/configuration/http_filters/rate_limit_filter.rst b/docs/root/configuration/http_filters/rate_limit_filter.rst new file mode 100644 index 000000000000..dcac97e337cb --- /dev/null +++ b/docs/root/configuration/http_filters/rate_limit_filter.rst @@ -0,0 +1,126 @@ +.. _config_http_filters_rate_limit: + +Rate limit +========== + +* Global rate limiting :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +The HTTP rate limit filter will call the rate limit service when the request's route or virtual host +has one or more :ref:`rate limit configurations` +that match the filter stage setting. The :ref:`route` +can optionally include the virtual host rate limit configurations. More than one configuration can +apply to a request. Each configuration results in a descriptor being sent to the rate limit service. + +If the rate limit service is called, and the response for any of the descriptors is over limit, a +429 response is returned. + +.. _config_http_filters_rate_limit_composing_actions: + +Composing Actions +----------------- + +.. attention:: + + This section is written for the v1 API but the concepts also apply to the v2 API. It will be + rewritten to target the v2 API in a future release. + +Each :ref:`rate limit action ` on the route or +virtual host populates a descriptor entry. A vector of descriptor entries compose a descriptor. To +create more complex rate limit descriptors, actions can be composed in any order. The descriptor +will be populated in the order the actions are specified in the configuration. + +Example 1 +^^^^^^^^^ + +For example, to generate the following descriptor: + +.. code-block:: cpp + + ("generic_key", "some_value") + ("source_cluster", "from_cluster") + +The configuration would be: + +.. code-block:: json + + { + "actions" : [ + { + "type" : "generic_key", + "descriptor_value" : "some_value" + }, + { + "type" : "source_cluster" + } + ] + } + +Example 2 +^^^^^^^^^ + +If an action doesn't append a descriptor entry, no descriptor is generated for +the configuration. + +For the following configuration: + +.. code-block:: json + + { + "actions" : [ + { + "type" : "generic_key", + "descriptor_value" : "some_value" + }, + { + "type" : "remote_address" + }, + { + "type" : "souce_cluster" + } + ] + } + +If a request did not set :ref:`x-forwarded-for`, +no descriptor is generated. + +If a request sets :ref:`x-forwarded-for`, the +the following descriptor is generated: + +.. code-block:: cpp + + ("generic_key", "some_value") + ("remote_address", "") + ("source_cluster", "from_cluster") + +Statistics +---------- + +The buffer filter outputs statistics in the *cluster..ratelimit.* namespace. +429 responses are emitted to the normal cluster :ref:`dynamic HTTP statistics +`. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + ok, Counter, Total under limit responses from the rate limit service + error, Counter, Total errors contacting the rate limit service + over_limit, Counter, total over limit responses from the rate limit service + +Runtime +------- + +The HTTP rate limit filter supports the following runtime settings: + +ratelimit.http_filter_enabled + % of requests that will call the rate limit service. Defaults to 100. + +ratelimit.http_filter_enforcing + % of requests that will call the rate limit service and enforce the decision. Defaults to 100. + This can be used to test what would happen before fully enforcing the outcome. + +ratelimit..http_filter_enabled + % of requests that will call the rate limit service for a given *route_key* specified in the + :ref:`rate limit configuration `. Defaults to 100. diff --git a/docs/root/configuration/http_filters/router_filter.rst b/docs/root/configuration/http_filters/router_filter.rst new file mode 100644 index 000000000000..a6a07cc15bef --- /dev/null +++ b/docs/root/configuration/http_filters/router_filter.rst @@ -0,0 +1,297 @@ +.. _config_http_filters_router: + +Router +====== + +The router filter implements HTTP forwarding. It will be used in almost all HTTP proxy scenarios +that Envoy is deployed for. The filter's main job is to follow the instructions specified in the +configured :ref:`route table `. In addition to forwarding and +redirection, the filter also handles retry, statistics, etc. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _config_http_filters_router_headers: + +HTTP headers +------------ + +The router consumes and sets various HTTP headers both on the egress/request path as well as on the +ingress/response path. They are documented in this section. + +.. contents:: + :local: + +.. _config_http_filters_router_x-envoy-expected-rq-timeout-ms: + +x-envoy-expected-rq-timeout-ms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is the time in milliseconds the router expects the request to be completed. Envoy sets this +header so that the upstream host receiving the request can make decisions based on the request +timeout, e.g., early exit. This is set on internal requests and is either taken from the +:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` header or the :ref:`route timeout +`, in that order. + +.. _config_http_filters_router_x-envoy-max-retries: + +x-envoy-max-retries +^^^^^^^^^^^^^^^^^^^ + +If a :ref:`retry policy ` is in place, Envoy will default to retrying one +time unless explicitly specified. The number of retries can be explicitly set in the +:ref:`route retry config ` or by using this header. +If a :ref:`retry policy ` is not configured and +:ref:`config_http_filters_router_x-envoy-retry-on` or +:ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not specified, Envoy will not retry a failed request. + +A few notes on how Envoy does retries: + +* The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the + :ref:`route configuration `) **includes** all + retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the + retry (including backoff) has .3s to complete. This is by design to avoid an exponential + retry/timeout explosion. +* Envoy uses a fully jittered exponential backoff algorithm for retries with a base time of 25ms. + The first retry will be delayed randomly between 0-24ms, the 2nd between 0-74ms, the 3rd between + 0-174ms and so on. +* If max retries is set both by header as well as in the route configuration, the maximum value is + taken when determining the max retries to use for the request. + +.. _config_http_filters_router_x-envoy-retry-on: + +x-envoy-retry-on +^^^^^^^^^^^^^^^^ + +Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number +of retries defaults to 1 and can be controlled by :ref:`x-envoy-max-retries +` header or the :ref:`route config retry policy +`). The value to which the x-envoy-retry-on header is +set indicates the retry policy. One or more policies can be specified using a ',' delimited list. +The supported policies are: + +5xx + Envoy will attempt a retry if the upstream server responds with any 5xx response code, or does not + respond at all (disconnect/reset/read timeout). (Includes *connect-failure* and *refused-stream*) + + * **NOTE:** Envoy will not retry when a request exceeds + :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` (resulting in a 504 error + code). Use :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` if you want + to retry when individual attempts take too long. + :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` is an outer time limit for a + request, including any retries that take place. + +gateway-error + This policy is similar to the *5xx* policy but will only retry requests that result in a 502, 503, + or 504. + +connect-failure + Envoy will attempt a retry if a request is failed because of a connection failure to the upstream + server (connect timeout, etc.). (Included in *5xx*) + + * **NOTE:** A connection failure/timeout is a the TCP level, not the request level. This does not + include upstream request timeouts specified via + :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or via :ref:`route + configuration `. + +retriable-4xx + Envoy will attempt a retry if the upstream server responds with a retriable 4xx response code. + Currently, the only response code in this category is 409. + + * **NOTE:** Be careful turning on this retry type. There are certain cases where a 409 can indicate + that an optimistic locking revision needs to be updated. Thus, the caller should not retry and + needs to read then attempt another write. If a retry happens in this type of case it will always + fail with another 409. + +refused-stream + Envoy will attempt a retry if the upstream server resets the stream with a REFUSED_STREAM error + code. This reset type indicates that a request is safe to retry. (Included in *5xx*) + +The number of retries can be controlled via the +:ref:`config_http_filters_router_x-envoy-max-retries` header or via the :ref:`route +configuration `. + +Note that retry policies can also be applied at the :ref:`route level +`. + +By default, Envoy will *not* perform retries unless you've configured them per above. + +.. _config_http_filters_router_x-envoy-retry-grpc-on: + +x-envoy-retry-grpc-on +^^^^^^^^^^^^^^^^^^^^^ +Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number of +retries defaults to 1, and can be controlled by +:ref:`x-envoy-max-retries ` +header or the :ref:`route config retry policy `). +gRPC retries are currently only supported for gRPC status codes in response headers. gRPC status codes in +trailers will not trigger retry logic. One or more policies can be specified using a ',' delimited +list. The supported policies are: + +cancelled + Envoy will attempt a retry if the gRPC status code in the response headers is "cancelled" (1) + +deadline-exceeded + Envoy will attempt a retry if the gRPC status code in the response headers is "deadline-exceeded" (4) + +resource-exhausted + Envoy will attempt a retry if the gRPC status code in the response headers is "resource-exhausted" (8) + +As with the x-envoy-retry-grpc-on header, the number of retries can be controlled via the +:ref:`config_http_filters_router_x-envoy-max-retries` header + +Note that retry policies can also be applied at the :ref:`route level +`. + +By default, Envoy will *not* perform retries unless you've configured them per above. + +.. _config_http_filters_router_x-envoy-upstream-alt-stat-name: + +x-envoy-upstream-alt-stat-name +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Setting this header on egress requests will cause Envoy to emit upstream response code/timing +statistics to a dual stat tree. This can be useful for application level categories that Envoy +doesn't know about. The output tree is documented :ref:`here `. + +This should not be confused with :ref:`alt_stat_name ` which +is specified while defining the cluster and when provided specifies an alternative name for the +cluster at the root of the statistic tree. + +x-envoy-upstream-canary +^^^^^^^^^^^^^^^^^^^^^^^ + +If an upstream host sets this header, the router will use it to generate canary specific statistics. +The output tree is documented :ref:`here `. + +.. _config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response: + +x-envoy-upstream-rq-timeout-alt-response +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Setting this header on egress requests will cause Envoy to set a 204 response code (instead of 504) +in the event of a request timeout. The actual value of the header is ignored; only its presence +is considered. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`. + +.. _config_http_filters_router_x-envoy-upstream-rq-timeout-ms: + +x-envoy-upstream-rq-timeout-ms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Setting this header on egress requests will cause Envoy to override the :ref:`route configuration +`. The timeout must be specified in millisecond +units. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`. + +.. _config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms: + +x-envoy-upstream-rq-per-try-timeout-ms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Setting this header on egress requests will cause Envoy to set a *per try* timeout on routed +requests. This timeout must be <= the global route timeout (see +:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`) or it is ignored. This allows a +caller to set a tight per try timeout to allow for retries while maintaining a reasonable overall +timeout. + +x-envoy-upstream-service-time +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Contains the time in milliseconds spent by the upstream host processing the request. This is useful +if the client wants to determine service time compared to network latency. This header is set on +responses. + +.. _config_http_filters_router_x-envoy-original-path: + +x-envoy-original-path +^^^^^^^^^^^^^^^^^^^^^ + +If the route utilizes :ref:`prefix_rewrite `, +Envoy will put the original path header in this header. This can be useful for logging and +debugging. + +.. _config_http_filters_router_x-envoy-immediate-health-check-fail: + +x-envoy-immediate-health-check-fail +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the upstream host returns this header (set to any value), Envoy will immediately assume the +upstream host has failed :ref:`active health checking ` (if the +cluster has been :ref:`configured ` for active health checking). +This can be used to fast fail an upstream host via standard data plane processing without waiting +for the next health check interval. The host can become healthy again via standard active health +checks. See the :ref:`health checking overview ` for more +information. + +.. _config_http_filters_router_x-envoy-overloaded: + +x-envoy-overloaded +^^^^^^^^^^^^^^^^^^ + +If this header is set by upstream, Envoy will not retry. Currently the value of the header is not +looked at, only its presence. Additionally, Envoy will set this header on the downstream response +if a request was dropped due to either :ref:`maintenance mode +` or upstream :ref:`circuit breaking +`. + +.. _config_http_filters_router_x-envoy-decorator-operation: + +x-envoy-decorator-operation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If this header is present on ingress requests, its value will override any locally defined +operation (span) name on the server span generated by the tracing mechanism. Similarly, if +this header is present on an egress response, its value will override any locally defined +operation (span) name on the client span. + +.. _config_http_filters_router_stats: + +Statistics +---------- + +The router outputs many statistics in the cluster namespace (depending on the cluster specified in +the chosen route). See :ref:`here ` for more information. + +The router filter outputs statistics in the *http..* namespace. The :ref:`stat +prefix ` comes from the owning HTTP connection manager. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + no_route, Counter, Total requests that had no route and resulted in a 404 + no_cluster, Counter, Total requests in which the target cluster did not exist and resulted in a 404 + rq_redirect, Counter, Total requests that resulted in a redirect response + rq_direct_response, Counter, Total requests that resulted in a direct response + rq_total, Counter, Total routed requests + +Virtual cluster statistics are output in the +*vhost..vcluster..* namespace and include the following +statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" + upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" + upstream_rq_time, Histogram, Request time milliseconds + +Runtime +------- + +The router filter supports the following runtime settings: + +upstream.base_retry_backoff_ms + Base exponential retry back off time. See :ref:`here ` for more + information. Defaults to 25ms. + +.. _config_http_filters_router_runtime_maintenance_mode: + +upstream.maintenance_mode. + % of requests that will result in an immediate 503 response. This overrides any routing behavior + for requests that would have been destined for . This can be used for load + shedding, failure injection, etc. Defaults to disabled. + +upstream.use_retry + % of requests that are eligible for retry. This configuration is checked before any other retry + configuration and can be used to fully disable retries across all Envoys if needed. diff --git a/docs/root/configuration/http_filters/squash_filter.rst b/docs/root/configuration/http_filters/squash_filter.rst new file mode 100644 index 000000000000..0e34f7d47d1a --- /dev/null +++ b/docs/root/configuration/http_filters/squash_filter.rst @@ -0,0 +1,40 @@ +.. _config_http_filters_squash: + +Squash +====== + +Squash is an HTTP filter which enables Envoy to integrate with Squash microservices debugger. +Code: https://github.com/solo-io/squash, API Docs: https://squash.solo.io/ + +Overview +-------- + +The main use case for this filter is in a service mesh, where Envoy is deployed as a sidecar. +Once a request marked for debugging enters the mesh, the Squash Envoy filter reports its 'location' +in the cluster to the Squash server - as there is a 1-1 mapping between Envoy sidecars and +application containers, the Squash server can find and attach a debugger to the application container. +The Squash filter also holds the request until a debugger is attached (or a timeout occurs). This +enables developers (via Squash) to attach a native debugger to the container that will handle the +request, before the request arrive to the application code, without any changes to the cluster. + +Configuration +------------- + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +How it works +------------ + +When the Squash filter encounters a request containing the header 'x-squash-debug' it will: + +1. Delay the incoming request. +2. Contact the Squash server and request the creation of a DebugAttachment + + - On the Squash server side, Squash will attempt to attach a debugger to the application Envoy + proxies to. On success, it changes the state of the DebugAttachment + to attached. + +3. Wait until the Squash server updates the DebugAttachment object's state to attached (or + error state) +4. Resume the incoming request diff --git a/docs/root/configuration/listener_filters/listener_filters.rst b/docs/root/configuration/listener_filters/listener_filters.rst new file mode 100644 index 000000000000..a61fd3ab29d8 --- /dev/null +++ b/docs/root/configuration/listener_filters/listener_filters.rst @@ -0,0 +1,11 @@ +.. _config_listener_filters: + +Listener filters +================ + +Envoy has the follow builtin listener filters. + +.. toctree:: + :maxdepth: 2 + + original_dst_filter diff --git a/docs/root/configuration/listener_filters/original_dst_filter.rst b/docs/root/configuration/listener_filters/original_dst_filter.rst new file mode 100644 index 000000000000..0ff7e2f66d10 --- /dev/null +++ b/docs/root/configuration/listener_filters/original_dst_filter.rst @@ -0,0 +1,14 @@ +.. _config_listener_filters_original_dst: + +Original Destination +==================== + +Original destination listener filter reads the SO_ORIGINAL_DST socket option set when a connection +has been redirected by an iptables REDIRECT target, or by an iptables TPROXY target in combination +with setting the listener's :ref:`transparent ` option. +Later processing in Envoy sees the restored destination address as the connection's local address, +rather than the address at which the listener is listening at. Furthermore, :ref:`an original +destination cluster ` may be used to +forward HTTP requests or TCP connections to the restored destination address. + +* :ref:`v2 API reference ` diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst new file mode 100644 index 000000000000..3376235d0ce8 --- /dev/null +++ b/docs/root/configuration/listeners/lds.rst @@ -0,0 +1,50 @@ +.. _config_listeners_lds: + +Listener discovery service (LDS) +================================ + +The listener discovery service (LDS) is an optional API that Envoy will call to dynamically fetch +listeners. Envoy will reconcile the API response and add, modify, or remove known listeners +depending on what is required. + +The semantics of listener updates are as follows: + +* Every listener must have a unique :ref:`name `. If a name is not + provided, Envoy will create a UUID. Listeners that are to be dynamically updated should have a + unique name supplied by the management server. +* When a listener is added, it will be "warmed" before taking traffic. For example, if the listener + references an :ref:`RDS ` configuration, that configuration will be + resolved and fetched before the listener is moved to "active." +* Listeners are effectively constant once created. Thus, when a listener is updated, an entirely + new listener is created (with the same listen socket). This listener goes through the same + warming process described above for a newly added listener. +* When a listener is updated or removed, the old listener will be placed into a "draining" state + much like when the entire server is drained for restart. Connections owned by the listener will + be gracefully closed (if possible) for some period of time before the listener is removed and any + remaining connections are closed. The drain time is set via the :option:`--drain-time-s` option. + + .. note:: + + Any listeners that are statically defined within the Envoy configuration cannot be modified or + removed via the LDS API. + +Configuration +------------- + +* :ref:`v1 LDS API ` +* :ref:`v2 LDS API ` + +Statistics +---------- + +LDS has a statistics tree rooted at *listener_manager.lds.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + config_reload, Counter, Total API fetches that resulted in a config reload due to a different config + update_attempt, Counter, Total API fetches attempted + update_success, Counter, Total API fetches completed successfully + update_failure, Counter, Total API fetches that failed (either network or schema errors) + version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/configuration/listeners/listeners.rst b/docs/root/configuration/listeners/listeners.rst new file mode 100644 index 000000000000..947c7a8b18d8 --- /dev/null +++ b/docs/root/configuration/listeners/listeners.rst @@ -0,0 +1,17 @@ +.. _config_listeners: + +Listeners +========= + +The top level Envoy configuration contains a list of :ref:`listeners `. +Each individual listener configuration has the following format: + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. toctree:: + :hidden: + + stats + runtime + lds diff --git a/docs/root/configuration/listeners/runtime.rst b/docs/root/configuration/listeners/runtime.rst new file mode 100644 index 000000000000..4683d18e5305 --- /dev/null +++ b/docs/root/configuration/listeners/runtime.rst @@ -0,0 +1,8 @@ +Runtime +======= + +Listeners support the following runtime settings: + +ssl.alt_alpn + What % of requests use the configured :ref:`alt_alpn ` + protocol string. Defaults to 0. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst new file mode 100644 index 000000000000..d0c30c5e7696 --- /dev/null +++ b/docs/root/configuration/listeners/stats.rst @@ -0,0 +1,47 @@ +.. _config_listener_stats: + +Statistics +========== + +Listener +-------- + +Every listener has a statistics tree rooted at *listener.
.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_cx_total, Counter, Total connections + downstream_cx_destroy, Counter, Total destroyed connections + downstream_cx_active, Gauge, Total active connections + downstream_cx_length_ms, Histogram, Connection length milliseconds + ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications + ssl.handshake, Counter, Total successful TLS connection handshakes + ssl.session_reused, Counter, Total successful TLS session resumptions + ssl.no_certificate, Counter, Total successul TLS connections with no client certificate + ssl.fail_no_sni_match, Counter, Total TLS connections that were rejected because of missing SNI match + ssl.fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate + ssl.fail_verify_error, Counter, Total TLS connections that failed CA verification + ssl.fail_verify_san, Counter, Total TLS connections that failed SAN verification + ssl.fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification + ssl.cipher., Counter, Total TLS connections that used + +Listener manager +---------------- + +The listener manager has a statistics tree rooted at *listener_manager.* with the following +statistics. Any ``:`` character in the stats name is replaced with ``_``. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + listener_added, Counter, Total listeners added (either via static config or LDS) + listener_modified, Counter, Total listeners modified (via LDS) + listener_removed, Counter, Total listeners removed (via LDS) + listener_create_success, Counter, Total listener objects successfully added to workers + listener_create_failure, Counter, Total failed listener object additions to workers + total_listeners_warming, Gauge, Number of currently warming listeners + total_listeners_active, Gauge, Number of currently active listeners + total_listeners_draining, Gauge, Number of currently draining listeners diff --git a/docs/root/configuration/network_filters/client_ssl_auth_filter.rst b/docs/root/configuration/network_filters/client_ssl_auth_filter.rst new file mode 100644 index 000000000000..ea166311cab2 --- /dev/null +++ b/docs/root/configuration/network_filters/client_ssl_auth_filter.rst @@ -0,0 +1,59 @@ +.. _config_network_filters_client_ssl_auth: + +Client TLS authentication +========================= + +* Client TLS authentication filter :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _config_network_filters_client_ssl_auth_stats: + +Statistics +---------- + +Every configured client TLS authentication filter has statistics rooted at +*auth.clientssl..* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + update_success, Counter, Total principal update successes + update_failure, Counter, Total principal update failures + auth_no_ssl, Counter, Total connections ignored due to no TLS + auth_ip_white_list, Counter, Total connections allowed due to the IP white list + auth_digest_match, Counter, Total connections allowed due to certificate match + auth_digest_no_match, Counter, Total connections denied due to no certificate match + total_principals, Gauge, Total loaded principals + +.. _config_network_filters_client_ssl_auth_rest_api: + +REST API +-------- + +.. http:get:: /v1/certs/list/approved + + The authentication filter will call this API every refresh interval to fetch the current list + of approved certificates/principals. The expected JSON response looks like: + + .. code-block:: json + + { + "certificates": [] + } + + certificates + *(required, array)* list of approved certificates/principals. + + Each certificate object is defined as: + + .. code-block:: json + + { + "fingerprint_sha256": "...", + } + + fingerprint_sha256 + *(required, string)* The SHA256 hash of the approved client certificate. Envoy will match this + hash to the presented client certificate to determine whether there is a digest match. diff --git a/docs/root/configuration/network_filters/echo_filter.rst b/docs/root/configuration/network_filters/echo_filter.rst new file mode 100644 index 000000000000..0073eedaa51d --- /dev/null +++ b/docs/root/configuration/network_filters/echo_filter.rst @@ -0,0 +1,10 @@ +.. _config_network_filters_echo: + +Echo +==== + +The echo is a trivial network filter mainly meant to demonstrate the network filter API. If +installed it will echo (write) all received data back to the connected downstream client. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` diff --git a/docs/root/configuration/network_filters/mongo_proxy_filter.rst b/docs/root/configuration/network_filters/mongo_proxy_filter.rst new file mode 100644 index 000000000000..0ee4aa111bae --- /dev/null +++ b/docs/root/configuration/network_filters/mongo_proxy_filter.rst @@ -0,0 +1,176 @@ +.. _config_network_filters_mongo_proxy: + +Mongo proxy +=========== + +- MongoDB :ref:`architecture overview ` +- :ref:`v1 API reference ` +- :ref:`v2 API reference ` + +.. _config_network_filters_mongo_proxy_fault_injection: + +Fault injection +--------------- + +The Mongo proxy filter supports fault injection. See the v1 and v2 API reference for how to +configure. + +.. _config_network_filters_mongo_proxy_stats: + +Statistics +---------- + +Every configured MongoDB proxy filter has statistics rooted at *mongo..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + decoding_error, Counter, Number of MongoDB protocol decoding errors + delay_injected, Counter, Number of times the delay is injected + op_get_more, Counter, Number of OP_GET_MORE messages + op_insert, Counter, Number of OP_INSERT messages + op_kill_cursors, Counter, Number of OP_KILL_CURSORS messages + op_query, Counter, Number of OP_QUERY messages + op_query_tailable_cursor, Counter, Number of OP_QUERY with tailable cursor flag set + op_query_no_cursor_timeout, Counter, Number of OP_QUERY with no cursor timeout flag set + op_query_await_data, Counter, Number of OP_QUERY with await data flag set + op_query_exhaust, Counter, Number of OP_QUERY with exhaust flag set + op_query_no_max_time, Counter, Number of queries without maxTimeMS set + op_query_scatter_get, Counter, Number of scatter get queries + op_query_multi_get, Counter, Number of multi get queries + op_query_active, Gauge, Number of active queries + op_reply, Counter, Number of OP_REPLY messages + op_reply_cursor_not_found, Counter, Number of OP_REPLY with cursor not found flag set + op_reply_query_failure, Counter, Number of OP_REPLY with query failure flag set + op_reply_valid_cursor, Counter, Number of OP_REPLY with a valid cursor + cx_destroy_local_with_active_rq, Counter, Connections destroyed locally with an active query + cx_destroy_remote_with_active_rq, Counter, Connections destroyed remotely with an active query + cx_drain_close, Counter, Connections gracefully closed on reply boundaries during server drain + +Scatter gets +^^^^^^^^^^^^ + +Envoy defines a *scatter get* as any query that does not use an *_id* field as a query parameter. +Envoy looks in both the top level document as well as within a *$query* field for *_id*. + +Multi gets +^^^^^^^^^^ + +Envoy defines a *multi get* as any query that does use an *_id* field as a query parameter, but +where *_id* is not a scalar value (i.e., a document or an array). Envoy looks in both the top level +document as well as within a *$query* field for *_id*. + +.. _config_network_filters_mongo_proxy_comment_parsing: + +$comment parsing +^^^^^^^^^^^^^^^^ + +If a query has a top level *$comment* field (typically in addition to a *$query* field), Envoy will +parse it as JSON and look for the following structure: + +.. code-block:: json + + { + "callingFunction": "..." + } + +callingFunction + *(required, string)* the function that made the query. If available, the function will be used + in :ref:`callsite ` query statistics. + +Per command statistics +^^^^^^^^^^^^^^^^^^^^^^ + +The MongoDB filter will gather statistics for commands in the *mongo..cmd..* +namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + total, Counter, Number of commands + reply_num_docs, Histogram, Number of documents in reply + reply_size, Histogram, Size of the reply in bytes + reply_time_ms, Histogram, Command time in milliseconds + +.. _config_network_filters_mongo_proxy_collection_stats: + +Per collection query statistics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The MongoDB filter will gather statistics for queries in the +*mongo..collection..query.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + total, Counter, Number of queries + scatter_get, Counter, Number of scatter gets + multi_get, Counter, Number of multi gets + reply_num_docs, Histogram, Number of documents in reply + reply_size, Histogram, Size of the reply in bytes + reply_time_ms, Histogram, Query time in milliseconds + +.. _config_network_filters_mongo_proxy_callsite_stats: + +Per collection and callsite query statistics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If the application provides the :ref:`calling function +` in the *$comment* field, Envoy will generate +per callsite statistics. These statistics match the :ref:`per collection statistics +` but are found in the +*mongo..collection..callsite..query.* namespace. + +.. _config_network_filters_mongo_proxy_runtime: + +Runtime +------- + +The Mongo proxy filter supports the following runtime settings: + +mongo.connection_logging_enabled + % of connections that will have logging enabled. Defaults to 100. This allows only a % of + connections to have logging, but for all messages on those connections to be logged. + +mongo.proxy_enabled + % of connections that will have the proxy enabled at all. Defaults to 100. + +mongo.logging_enabled + % of messages that will be logged. Defaults to 100. If less than 100, queries may be logged + without replies, etc. + +mongo.mongo.drain_close_enabled + % of connections that will be drain closed if the server is draining and would otherwise + attempt a drain close. Defaults to 100. + +mongo.fault.fixed_delay.percent + Probability of an eligible MongoDB operation to be affected by + the injected fault when there is no active fault. + Defaults to the *percent* specified in the config. + +mongo.fault.fixed_delay.duration_ms + The delay duration in milliseconds. Defaults to the *duration_ms* specified in the config. + +Access log format +----------------- + +The access log format is not customizable and has the following layout: + +.. code-block:: json + + {"time": "...", "message": "...", "upstream_host": "..."} + +time + System time that complete message was parsed, including milliseconds. + +message + Textual expansion of the message. Whether the message is fully expanded depends on the context. + Sometimes summary data is presented to avoid extremely large log sizes. + +upstream_host + The upstream host that the connection is proxying to, if available. This is populated if the + filter is used along with the :ref:`TCP proxy filter `. diff --git a/docs/root/configuration/network_filters/network_filters.rst b/docs/root/configuration/network_filters/network_filters.rst new file mode 100644 index 000000000000..715fe799e1b3 --- /dev/null +++ b/docs/root/configuration/network_filters/network_filters.rst @@ -0,0 +1,18 @@ +.. _config_network_filters: + +Network filters +=============== + +In addition to the :ref:`HTTP connection manager ` which is large +enough to have its own section in the configuration guide, Envoy has the follow builtin network +filters. + +.. toctree:: + :maxdepth: 2 + + client_ssl_auth_filter + echo_filter + mongo_proxy_filter + rate_limit_filter + redis_proxy_filter + tcp_proxy_filter diff --git a/docs/root/configuration/network_filters/rate_limit_filter.rst b/docs/root/configuration/network_filters/rate_limit_filter.rst new file mode 100644 index 000000000000..4cefd6813699 --- /dev/null +++ b/docs/root/configuration/network_filters/rate_limit_filter.rst @@ -0,0 +1,39 @@ +.. _config_network_filters_rate_limit: + +Rate limit +========== + +* Global rate limiting :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _config_network_filters_rate_limit_stats: + +Statistics +---------- + +Every configured rate limit filter has statistics rooted at *ratelimit..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + total, Counter, Total requests to the rate limit service + error, Counter, Total errors contacting the rate limit service + over_limit, Counter, Total over limit responses from the rate limit service + ok, Counter, Total under limit responses from the rate limit service + cx_closed, Counter, Total connections closed due to an over limit response from the rate limit service + active, Gauge, Total active requests to the rate limit service + +Runtime +------- + +The network rate limit filter supports the following runtime settings: + +ratelimit.tcp_filter_enabled + % of connections that will call the rate limit service. Defaults to 100. + +ratelimit.tcp_filter_enforcing + % of connections that will call the rate limit service and enforce the decision. Defaults to 100. + This can be used to test what would happen before fully enforcing the outcome. diff --git a/docs/root/configuration/network_filters/redis_proxy_filter.rst b/docs/root/configuration/network_filters/redis_proxy_filter.rst new file mode 100644 index 000000000000..0ebdbfae8112 --- /dev/null +++ b/docs/root/configuration/network_filters/redis_proxy_filter.rst @@ -0,0 +1,69 @@ +.. _config_network_filters_redis_proxy: + +Redis proxy +=========== + +* Redis :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _config_network_filters_redis_proxy_stats: + +Statistics +---------- + +Every configured Redis proxy filter has statistics rooted at *redis..* with the +following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_cx_active, Gauge, Total active connections + downstream_cx_protocol_error, Counter, Total protocol errors + downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered + downstream_cx_rx_bytes_total, Counter, Total bytes received + downstream_cx_total, Counter, Total connections + downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered + downstream_cx_tx_bytes_total, Counter, Total bytes sent + downstream_cx_drain_close, Counter, Number of connections closed due to draining + downstream_rq_active, Gauge, Total active requests + downstream_rq_total, Counter, Total requests + + +Splitter statistics +------------------- + +The Redis filter will gather statistics for the command splitter in the +*redis..splitter.* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + invalid_request, Counter, "Number of requests with an incorrect number of arguments" + unsupported_command, Counter, "Number of commands issued which are not recognized by the + command splitter" + +Per command statistics +---------------------- + +The Redis filter will gather statistics for commands in the +*redis..command..* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + total, Counter, Number of commands + +.. _config_network_filters_redis_proxy_per_command_stats: + +Runtime +------- + +The Redis proxy filter supports the following runtime settings: + +redis.drain_close_enabled + % of connections that will be drain closed if the server is draining and would otherwise + attempt a drain close. Defaults to 100. diff --git a/docs/root/configuration/network_filters/tcp_proxy_filter.rst b/docs/root/configuration/network_filters/tcp_proxy_filter.rst new file mode 100644 index 000000000000..61feb086794c --- /dev/null +++ b/docs/root/configuration/network_filters/tcp_proxy_filter.rst @@ -0,0 +1,28 @@ +.. _config_network_filters_tcp_proxy: + +TCP proxy +========= + +* TCP proxy :ref:`architecture overview ` +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _config_network_filters_tcp_proxy_stats: + +Statistics +---------- + +The TCP proxy filter emits both its own downstream statistics as well as many of the :ref:`cluster +upstream statistics ` where applicable. The downstream +statistics are rooted at *tcp..* with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + downstream_cx_total, Counter, Total number of connections handled by the filter + downstream_cx_no_route, Counter, Number of connections for which no matching route was found + downstream_cx_tx_bytes_total, Counter, Total bytes written to the downstream connection + downstream_cx_tx_bytes_buffered, Gauge, Total bytes currently buffered to the downstream connection + downstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from downstream + downstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from downstream diff --git a/docs/root/configuration/overview/v1_overview.rst b/docs/root/configuration/overview/v1_overview.rst new file mode 100644 index 000000000000..e1e0a305a4a7 --- /dev/null +++ b/docs/root/configuration/overview/v1_overview.rst @@ -0,0 +1,117 @@ +.. _config_overview_v1: + +Overview (v1 API) +================= + +.. attention:: + + The v1 configuration/API is now considered legacy and the `deprecation schedule `_ + has been announced. Please upgrade and use the :ref:`v2 configuration/API `. + +The Envoy configuration format is written in JSON and is validated against a JSON schema. The +schema can be found in :repo:`source/common/json/config_schemas.cc`. The main configuration for the +server is contained within the listeners and cluster manager sections. The other top level elements +specify miscellaneous configuration. + +YAML support is also provided as a syntactic convenience for hand-written configurations. Envoy will +internally convert YAML to JSON if a file path ends with .yaml. In the rest of the configuration +documentation, we refer exclusively to JSON. Envoy expects unambiguous YAML scalars, so if a cluster +name (which should be a string) is called *true*, it should be written in the configuration YAML as +*"true"*. The same applies to integer and floating point values (e.g. *1* vs. *1.0* vs. *"1.0"*). + + +.. code-block:: json + + { + "listeners": [], + "lds": "{...}", + "admin": "{...}", + "cluster_manager": "{...}", + "flags_path": "...", + "statsd_udp_ip_address": "...", + "statsd_tcp_cluster_name": "...", + "stats_flush_interval_ms": "...", + "watchdog_miss_timeout_ms": "...", + "watchdog_megamiss_timeout_ms": "...", + "watchdog_kill_timeout_ms": "...", + "watchdog_multikill_timeout_ms": "...", + "tracing": "{...}", + "rate_limit_service": "{...}", + "runtime": "{...}", + } + +:ref:`listeners ` + *(required, array)* An array of :ref:`listeners ` that will be + instantiated by the server. A single Envoy process can contain any number of listeners. + +.. _config_overview_lds: + +:ref:`lds ` + *(optional, object)* Configuration for the Listener Discovery Service (LDS). If not specified + only static listeners are loaded. + +:ref:`admin ` + *(required, object)* Configuration for the :ref:`local administration HTTP server + `. + +:ref:`cluster_manager ` + *(required, object)* Configuration for the :ref:`cluster manager ` + which owns all upstream clusters within the server. + +.. _config_overview_flags_path: + +flags_path + *(optional, string)* The file system path to search for :ref:`startup flag files + `. + +.. _config_overview_statsd_udp_ip_address: + +statsd_udp_ip_address + *(optional, string)* The UDP address of a running statsd compliant listener. If specified, + :ref:`statistics ` will be flushed to this address. IPv4 addresses should + have format host:port (ex: 127.0.0.1:855). IPv6 addresses should have URL format [host]:port + (ex: [::1]:855). + +statsd_tcp_cluster_name + *(optional, string)* The name of a cluster manager cluster that is running a TCP statsd compliant + listener. If specified, Envoy will connect to this cluster to flush :ref:`statistics + `. + +.. _config_overview_stats_flush_interval_ms: + +stats_flush_interval_ms + *(optional, integer)* The time in milliseconds between flushes to configured stats sinks. For + performance reasons Envoy latches counters and only flushes counters and gauges at a periodic + interval. If not specified the default is 5000ms (5 seconds). + +watchdog_miss_timeout_ms + *(optional, integer)* The time in milliseconds after which Envoy counts a nonresponsive thread in the + "server.watchdog_miss" statistic. If not specified the default is 200ms. + +watchdog_megamiss_timeout_ms + *(optional, integer)* The time in milliseconds after which Envoy counts a nonresponsive thread in the + "server.watchdog_mega_miss" statistic. If not specified the default is 1000ms. + +watchdog_kill_timeout_ms + *(optional, integer)* If a watched thread has been nonresponsive for this many milliseconds assume + a programming error and kill the entire Envoy process. Set to 0 to disable kill behavior. If not + specified the default is 0 (disabled). + +watchdog_multikill_timeout_ms + *(optional, integer)* If at least two watched threads have been nonresponsive for at least this many + milliseconds assume a true deadlock and kill the entire Envoy process. Set to 0 to disable this + behavior. If not specified the default is 0 (disabled). + +:ref:`tracing ` + *(optional, object)* Configuration for an external :ref:`tracing ` + provider. If not specified, no tracing will be performed. + +:ref:`rate_limit_service ` + *(optional, object)* Configuration for an external :ref:`rate limit service + ` provider. If not specified, any calls to the rate limit service will + immediately return success. + +:ref:`runtime ` + *(optional, object)* Configuration for the :ref:`runtime configuration ` + provider. If not specified, a "null" provider will be used which will result in all defaults being + used. diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst new file mode 100644 index 000000000000..fef93e38eae8 --- /dev/null +++ b/docs/root/configuration/overview/v2_overview.rst @@ -0,0 +1,544 @@ +.. _config_overview_v2: + +Overview (v2 API) +================= + +The Envoy v2 APIs are defined as `proto3 +`_ `Protocol Buffers +`_ in the `data plane API +repository `_. They evolve the +existing :ref:`v1 APIs and concepts ` to support: + +* Streaming delivery of `xDS `_ + API updates via gRPC. This reduces resource requirements and can lower the update latency. +* A new REST-JSON API in which the JSON/YAML formats are derived mechanically via the `proto3 + canonical JSON mapping + `_. +* Delivery of updates via the filesystem, REST-JSON or gRPC endpoints. +* Advanced load balancing through an extended endpoint assignment API and load + and resource utilization reporting to management servers. +* `Stronger consistency and ordering properties + `_ + when needed. The v2 APIs still maintain a baseline eventual consistency model. + +See the `xDS protocol description `_ for +further details on aspects of v2 message exchange between Envoy and the management server. + +.. _config_overview_v2_bootstrap: + +Bootstrap configuration +----------------------- + +To use the v2 API, it's necessary to supply a bootstrap configuration file. This +provides static server configuration and configures Envoy to access :ref:`dynamic +configuration if needed `. As with the v1 +JSON/YAML configuration, this is supplied on the command-line via the :option:`-c` +flag, i.e.: + +.. code-block:: console + + ./envoy -c .{json,yaml,pb,pb_text} --v2-config-only + +where the extension reflects the underlying v2 config representation. The +:option:`--v2-config-only` flag is not strictly required as Envoy will attempt +to autodetect the config file version, but this option provides an enhanced +debug experience when configuration parsing fails. + +The :ref:`Bootstrap ` message is the root of the +configuration. A key concept in the :ref:`Bootstrap ` +message is the distinction between static and dynamic resouces. Resources such +as a :ref:`Listener ` or :ref:`Cluster +` may be supplied either statically in +:ref:`static_resources ` or have +an xDS service such as :ref:`LDS +` or :ref:`CDS ` configured in +:ref:`dynamic_resources `. + +Example +------- + +Below we will use YAML representation of the config protos and a running example +of a service proxying HTTP from 127.0.0.1:10000 to 127.0.0.2:1234. + +Static +^^^^^^ + +A minimal fully static bootstrap config is provided below: + +.. code-block:: yaml + + admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + + static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + stat_prefix: ingress_http + codec_type: AUTO + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: some_service } + http_filters: + - name: envoy.router + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + hosts: [{ socket_address: { address: 127.0.0.2, port_value: 1234 }}] + +Mostly static with dynamic EDS +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A bootstrap config that continues from the above example with :ref:`dynamic endpoint +discovery ` via an +:ref:`EDS` gRPC management server listening +on 127.0.0.3:5678 is provided below: + +.. code-block:: yaml + + admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + + static_resources: + listeners: + - name: listener_0 + address: + socket_address: { address: 127.0.0.1, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + stat_prefix: ingress_http + codec_type: AUTO + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: some_service } + http_filters: + - name: envoy.router + clusters: + - name: some_service + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + type: EDS + eds_cluster_config: + eds_config: + api_config_source: + api_type: GRPC + cluster_names: [xds_cluster] + - name: xds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + hosts: [{ socket_address: { address: 127.0.0.3, port_value: 5678 }}] + +Notice above that *xds_cluster* is defined to point Envoy at the management server. Even in +an otherwise completely dynamic configurations, some static resources need to +be defined to point Envoy at its xDS management server(s). + +In the above example, the EDS management server could then return a proto encoding of a +:ref:`DiscoveryResponse `: + +.. code-block:: yaml + + version_info: "0" + resources: + - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1234 + + +The versioning and type URL scheme that appear above are explained in more +detail in the `streaming gRPC subscription protocol +`_ +documentation. + +Dynamic +^^^^^^^ + +A fully dynamic bootstrap configuration, in which all resources other than +those belonging to the management server are discovered via xDS is provided +below: + +.. code-block:: yaml + + admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + + dynamic_resources: + lds_config: + api_config_source: + api_type: GRPC + cluster_names: [xds_cluster] + cds_config: + api_config_source: + api_type: GRPC + cluster_names: [xds_cluster] + + static_resources: + clusters: + - name: xds_cluster + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + hosts: [{ socket_address: { address: 127.0.0.3, port_value: 5678 }}] + +The management server could respond to LDS requests with: + +.. code-block:: yaml + + version_info: "0" + resources: + - "@type": type.googleapis.com/envoy.api.v2.Listener + name: listener_0 + address: + socket_address: + address: 127.0.0.1 + port_value: 10000 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + stat_prefix: ingress_http + codec_type: AUTO + rds: + route_config_name: local_route + config_source: + api_config_source: + api_type: GRPC + cluster_names: [xds_cluster] + http_filters: + - name: envoy.router + +The management server could respond to RDS requests with: + +.. code-block:: yaml + + version_info: "0" + resources: + - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { cluster: some_service } + +The management server could respond to CDS requests with: + +.. code-block:: yaml + + version_info: "0" + resources: + - "@type": type.googleapis.com/envoy.api.v2.Cluster + name: some_service + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + type: EDS + eds_cluster_config: + eds_config: + api_config_source: + api_type: GRPC + cluster_names: [xds_cluster] + +The management server could respond to EDS requests with: + +.. code-block:: yaml + + version_info: "0" + resources: + - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment + cluster_name: some_service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.2 + port_value: 1234 + +Management server +----------------- + +A v2 xDS management server will implement the below endpoints as required for +gRPC and/or REST serving. In both streaming gRPC and +REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a +:ref:`DiscoveryResponse ` received following the +`xDS protocol `_. + +.. _v2_grpc_streaming_endpoints: + +gRPC streaming endpoints +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters + +See `cds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + cds_config: + api_config_source: + api_type: GRPC + cluster_names: [some_xds_cluster] + +is set in the :ref:`dynamic_resources +` of the :ref:`Bootstrap +` config. + +.. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints + +See `eds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + eds_config: + api_config_source: + api_type: GRPC + cluster_names: [some_xds_cluster] + +is set in the :ref:`eds_cluster_config +` field of the :ref:`Cluster +` config. + +.. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners + +See `lds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + lds_config: + api_config_source: + api_type: GRPC + cluster_names: [some_xds_cluster] + +is set in the :ref:`dynamic_resources +` of the :ref:`Bootstrap +` config. + +.. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes + +See `rds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + route_config_name: some_route_name + config_source: + api_config_source: + api_type: GRPC + cluster_names: [some_xds_cluster] + +is set in the :ref:`rds +` field of the :ref:`HttpConnectionManager +` config. + +REST endpoints +^^^^^^^^^^^^^^ + +.. http:post:: /v2/discovery:clusters + +See `cds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + cds_config: + api_config_source: + api_type: REST + cluster_names: [some_xds_cluster] + +is set in the :ref:`dynamic_resources +` of the :ref:`Bootstrap +` config. + +.. http:post:: /v2/discovery:endpoints + +See `eds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + eds_config: + api_config_source: + api_type: REST + cluster_names: [some_xds_cluster] + +is set in the :ref:`eds_cluster_config +` field of the :ref:`Cluster +` config. + +.. http:post:: /v2/discovery:listeners + +See `lds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + lds_config: + api_config_source: + api_type: REST + cluster_names: [some_xds_cluster] + +is set in the :ref:`dynamic_resources +` of the :ref:`Bootstrap +` config. + +.. http:post:: /v2/discovery:routes + +See `rds.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + route_config_name: some_route_name + config_source: + api_config_source: + api_type: REST + cluster_names: [some_xds_cluster] + +is set in the :ref:`rds +` field of the :ref:`HttpConnectionManager +` config. + +.. _config_overview_v2_ads: + +Aggregated Discovery Service +---------------------------- + +While Envoy fundamentally employs an eventual consistency model, ADS provides an +opportunity to sequence API update pushes and ensure affinity of a single +management server for an Envoy node for API updates. ADS allows one or more APIs +and their resources to be delivered on a single, bidirectional gRPC stream by +the management server. Without this, some APIs such as RDS and EDS may require +the management of multiple streams and connections to distinct management +servers. + +ADS will allow for hitless updates of configuration by appropriate sequencing. +For example, suppose *foo.com* was mappped to cluster *X*. We wish to change the +mapping in the route table to point *foo.com* at cluster *Y*. In order to do +this, a CDS/EDS update must first be delivered containing both clusters *X* and +*Y*. + +Without ADS, the CDS/EDS/RDS streams may point at distinct management servers, +or when on the same management server at distinct gRPC streams/connections that +require coordination. The EDS resource requests may be split across two distinct +streams, one for *X* and one for *Y*. ADS allows these to be coalesced to a +single stream to a single management server, avoiding the need for distributed +synchronization to correctly sequence the update. With ADS, the management +server would deliver the CDS, EDS and then RDS updates on a single stream. + +ADS is only available for gRPC streaming (not REST) and is described more fully +in `this +`_ +document. The gRPC endpoint is: + +.. http:post:: /envoy.api.v2.AggregatedDiscoveryService/StreamAggregatedResources + +See `discovery.proto +`_ +for the service definition. This is used by Envoy as a client when + +.. code-block:: yaml + + ads_config: + api_type: GRPC + cluster_names: [some_ads_cluster] + +is set in the :ref:`dynamic_resources +` of the :ref:`Bootstrap +` config. + +When this is set, any of the configuration sources :ref:`above ` can +be set to use the ADS channel. For example, a LDS config could be changed from + +.. code-block:: yaml + + lds_config: + api_config_source: + api_type: REST + cluster_names: [some_xds_cluster] + +to + +.. code-block:: yaml + + lds_config: {ads: {}} + +with the effect that the LDS stream will be directed to *some_ads_cluster* over +the shared ADS channel. + +.. _config_overview_v2_status: + +Status +------ + +All features described in the :ref:`v2 API reference ` are +implemented unless otherwise noted. In the v2 API reference and the +`v2 API repository +`_, all protos are +*frozen* unless they are tagged as *draft* or *experimental*. Here, *frozen* +means that we will not break wire format compatibility. + +*Frozen* protos may be further extended, e.g. by adding new fields, in a +manner that does not break `backwards compatibility +`_. +Fields in the above protos may be later deprecated, subject to the +`breaking change policy +`_, +when their related functionality is no longer required. While frozen APIs +have their wire format compatibility preserved, we reserve the right to change +proto namespaces, file locations and nesting relationships, which may cause +breaking code changes. We will aim to minimize the churn here. + +Protos tagged *draft*, meaning that they are near finalized, are +likely to be at least partially implemented in Envoy but may have wire format +breaking changes made prior to freezing. + +Protos tagged *experimental*, have the same caveats as draft protos +and may have have major changes made prior to Envoy implementation and freezing. + +The current open v2 API issues are tracked `here +`_. diff --git a/docs/root/configuration/rate_limit.rst b/docs/root/configuration/rate_limit.rst new file mode 100644 index 000000000000..8dc5638a2402 --- /dev/null +++ b/docs/root/configuration/rate_limit.rst @@ -0,0 +1,18 @@ +.. _config_rate_limit_service: + +Rate limit service +================== + +The :ref:`rate limit service ` configuration specifies the global rate +limit service Envoy should talk to when it needs to make global rate limit decisions. If no rate +limit service is configured, a "null" service will be used which will always return OK if called. + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +gRPC service IDL +---------------- + +Envoy expects the rate limit service to support the gRPC IDL specified in +:repo:`/source/common/ratelimit/ratelimit.proto`. See the IDL documentation for more information +on how the API works. See Lyft's reference implementation `here `_. diff --git a/docs/root/configuration/runtime.rst b/docs/root/configuration/runtime.rst new file mode 100644 index 000000000000..a13bff7a883e --- /dev/null +++ b/docs/root/configuration/runtime.rst @@ -0,0 +1,96 @@ +.. _config_runtime: + +Runtime +======= + +The :ref:`runtime configuration ` specifies the location of the local file +system tree that contains re-loadable configuration elements. Values can be viewed at the +:ref:`/runtime admin endpoint `. Values can be modified and +added at the :ref:`/runtime_modify admin endpoint `. If +runtime is not configured, an empty provider is used which has the effect of using all defaults +built into the code, except for any values added via `/runtime_modify`. + +.. attention:: + + Use the :ref:`/runtime_modify` endpoint with care. + Changes are effectively immediately. It is **critical** that the admin interface is :ref:`properly + secured `. + + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +File system layout +------------------ + +Various sections of the configuration guide describe the runtime settings that are available. +For example, :ref:`here ` are the runtime settings for +upstream clusters. + +Assume that the folder ``/srv/runtime/v1`` points to the actual file system path where global +runtime configurations are stored. The following would be a typical configuration setting for +runtime: + +* *symlink_root*: ``/srv/runtime/current`` +* *subdirectory*: ``envoy`` +* *override_subdirectory*: ``envoy_override`` + +Where ``/srv/runtime/current`` is a symbolic link to ``/srv/runtime/v1``. + +Each '.' in a runtime key indicates a new directory in the hierarchy, rooted at *symlink_root* + +*subdirectory*. For example, the *health_check.min_interval* key would have the following full +file system path (using the symbolic link): + +``/srv/runtime/current/envoy/health_check/min_interval`` + +The terminal portion of a path is the file. The contents of the file constitute the runtime value. +When reading numeric values from a file, spaces and new lines will be ignored. + +The *override_subdirectory* is used along with the :option:`--service-cluster` CLI option. Assume +that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will first look for the +*health_check.min_interval* key in the following full file system path: + +``/srv/runtime/current/envoy_override/my-cluster/health_check/min_interval`` + +If found, the value will override any value found in the primary lookup path. This allows the user +to customize the runtime values for individual clusters on top of global defaults. + +.. _config_runtime_comments: + +Comments +-------- + +Lines starting with ``#`` as the first character are treated as comments. + +Comments can be used to provide context on an existing value. Comments are also useful in an +otherwise empty file to keep a placeholder for deployment in a time of need. + +.. _config_runtime_symbolic_link_swap: + +Updating runtime values via symbolic link swap +---------------------------------------------- + +There are two steps to update any runtime value. First, create a hard copy of the entire runtime +tree and update the desired runtime values. Second, atomically swap the symbolic link root from the +old tree to the new runtime tree, using the equivalent of the following command: + +.. code-block:: console + + /srv/runtime:~$ ln -s /srv/runtime/v2 new && mv -Tf new current + +It's beyond the scope of this document how the file system data is deployed, garbage collected, etc. + +Statistics +---------- + +The file system runtime provider emits some statistics in the *runtime.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + load_error, Counter, Total number of load attempts that resulted in an error + override_dir_not_exists, Counter, Total number of loads that did not use an override directory + override_dir_exists, Counter, Total number of loads that did use an override directory + load_success, Counter, Total number of load attempts that were successful + num_keys, Gauge, Number of keys currently loaded diff --git a/docs/root/configuration/statistics.rst b/docs/root/configuration/statistics.rst new file mode 100644 index 000000000000..6ac411f59276 --- /dev/null +++ b/docs/root/configuration/statistics.rst @@ -0,0 +1,45 @@ +.. _statistics: + +Statistics +========== + +A few statistics are emitted to report statistics system behavior: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + stats.overflow, Counter, Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory + +Server +------ + +Server related statistics are rooted at *server.* with following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + uptime, Gauge, Current server uptime in seconds + memory_allocated, Gauge, Current amount of allocated memory in bytes + memory_heap_size, Gauge, Current reserved heap size in bytes + live, Gauge, "1 if the server is not currently draining, 0 otherwise" + parent_connections, Gauge, Total connections of the old Envoy process on hot restart + total_connections, Gauge, Total connections of both new and old Envoy processes + version, Gauge, Integer represented version number based on SCM revision + days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire + +File system +----------- + +Statistics related to file system are emitted in the *filesystem.* namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer + write_completed, Counter, Total number of times a file was written + flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout + reopen_failed, Counter, Total number of times a file was failed to be opened + write_total_buffered, Gauge, Current total size of internal flush buffer in bytes diff --git a/docs/root/configuration/tools/router_check.rst b/docs/root/configuration/tools/router_check.rst new file mode 100644 index 000000000000..3e12c6660aef --- /dev/null +++ b/docs/root/configuration/tools/router_check.rst @@ -0,0 +1,170 @@ +.. _config_tools_router_check_tool: + +Route table check tool +====================== + +**NOTE: The following configuration is for the route table check tool only and is not part of the Envoy binary. +The route table check tool is a standalone binary that can be used to verify Envoy's routing for a given configuration +file.** + +The following specifies input to the route table check tool. The route table check tool checks if +the route returned by a :ref:`router ` matches what is expected. +The tool can be used to check cluster name, virtual cluster name, +virtual host name, manual path rewrite, manual host rewrite, path redirect, and +header field matches. Extensions for other test cases can be added. Details about installing the tool +and sample tool input/output can be found at :ref:`installation `. + +The route table check tool config is composed of an array of json test objects. Each test object is composed of +three parts. + +Test name + This field specifies the name of each test object. + +Input values + The input value fields specify the parameters to be passed to the router. Example input fields include + the :authority, :path, and :method header fields. The :authority and :path fields specify the url + sent to the router and are required. All other input fields are optional. + +Validate + The validate fields specify the expected values and test cases to check. At least one test + case is required. + +A simple tool configuration json has one test case and is written as follows. The test +expects a cluster name match of "instant-server".:: + + [ + { + "test_name: "Cluster_name_test", + "input": + { + ":authority":"api.lyft.com", + ":path": "/api/locations" + } + "validate" + { + "cluster_name": "instant-server" + } + } + ] + +.. code-block:: json + + [ + { + "test_name": "...", + "input": + { + ":authority": "...", + ":path": "...", + ":method": "...", + "internal" : "...", + "random_value" : "...", + "ssl" : "...", + "additional_headers": [ + { + "field": "...", + "value": "..." + }, + { + "..." + } + ] + } + "validate": { + "cluster_name": "...", + "virtual_cluster_name": "...", + "virtual_host_name": "...", + "host_rewrite": "...", + "path_rewrite": "...", + "path_redirect": "...", + "header_fields" : [ + { + "field": "...", + "value": "..." + }, + { + "..." + } + ] + } + }, + { + "..." + } + ] + +test_name + *(required, string)* The name of a test object. + +input + *(required, object)* Input values sent to the router that determine the returned route. + + :authority + *(required, string)* The url authority. This value along with the path parameter define + the url to be matched. An example authority value is "api.lyft.com". + + :path + *(required, string)* The url path. An example path value is "/foo". + + :method + *(optional, string)* The request method. If not specified, the default method is GET. The options + are GET, PUT, or POST. + + internal + *(optional, boolean)* A flag that determines whether to set x-envoy-internal to "true". + If not specified, or if internal is equal to false, x-envoy-internal is not set. + + random_value + *(optional, integer)* An integer used to identify the target for weighted cluster selection. + The default value of random_value is 0. + + ssl + *(optional, boolean)* A flag that determines whether to set x-forwarded-proto to https or http. + By setting x-forwarded-proto to a given protocol, the tool is able to simulate the behavior of + a client issuing a request via http or https. By default ssl is false which corresponds to + x-forwarded-proto set to http. + + additional_headers + *(optional, array)* Additional headers to be added as input for route determination. The ":authority", + ":path", ":method", "x-forwarded-proto", and "x-envoy-internal" fields are specified by the other config + options and should not be set here. + + field + *(required, string)* The name of the header field to add. + + value + *(required, string)* The value of the header field to add. + +validate + *(required, object)* The validate object specifies the returned route parameters to match. At least one + test parameter must be specificed. Use "" (empty string) to indicate that no return value is expected. + For example, to test that no cluster match is expected use {"cluster_name": ""}. + + cluster_name + *(optional, string)* Match the cluster name. + + virtual_cluster_name + *(optional, string)* Match the virtual cluster name. + + virtual_host_name + *(optional, string)* Match the virtual host name. + + host_rewrite + *(optional, string)* Match the host header field after rewrite. + + path_rewrite + *(optional, string)* Match the path header field after rewrite. + + path_redirect + *(optional, string)* Match the returned redirect path. + + header_fields + *(optional, array)* Match the listed header fields. Examples header fields include the ":path", "cookie", + and "date" fields. The header fields are checked after all other test cases. Thus, the header fields checked + will be those of the redirected or rewriten routes when applicable. + + field + *(required, string)* The name of the header field to match. + + value + *(required, string)* The value of the header field to match. diff --git a/docs/root/extending/extending.rst b/docs/root/extending/extending.rst new file mode 100644 index 000000000000..8a6f4e22ced3 --- /dev/null +++ b/docs/root/extending/extending.rst @@ -0,0 +1,10 @@ +.. _extending: + +Extending Envoy for custom use cases +==================================== + +The Envoy architecture makes it fairly easily extensible via both :ref:`network filters +` and :ref:`HTTP filters `. + +An example of how to add a network filter and structure the repository and build dependencies can +be found at `envoy-filter-example `_. diff --git a/docs/root/faq/binaries.rst b/docs/root/faq/binaries.rst new file mode 100644 index 000000000000..f6eb60483c4c --- /dev/null +++ b/docs/root/faq/binaries.rst @@ -0,0 +1,4 @@ +Where do I get binaries? +======================== + +Please see :ref:`here `. diff --git a/docs/root/faq/how_fast_is_envoy.rst b/docs/root/faq/how_fast_is_envoy.rst new file mode 100644 index 000000000000..78b1dd4d20bc --- /dev/null +++ b/docs/root/faq/how_fast_is_envoy.rst @@ -0,0 +1,12 @@ +How fast is Envoy? +================== + +We are frequently asked *how fast is Envoy?* or *how much latency will Envoy add to my requests?* +The answer is: *it depends*. Performance depends a great deal on which Envoy features are being +used and the environment in which Envoy is run. In addition, doing accurate performance testing +is an incredibly difficult task that the project does not currently have resources for. + +Although we have done quite a bit of performance tuning of Envoy in the critical path and we +believe it performs extremely well, because of the previous points we do not currently publish +any official benchmarks. We encourage users to benchmark Envoy in their own environments with a +configuration similar to what they plan on using in production. diff --git a/docs/root/faq/lb_panic_threshold.rst b/docs/root/faq/lb_panic_threshold.rst new file mode 100644 index 000000000000..00d678254774 --- /dev/null +++ b/docs/root/faq/lb_panic_threshold.rst @@ -0,0 +1,6 @@ +I setup health checking. When I fail some hosts, Envoy starts routing to all of them again. Why? +================================================================================================ + +This feature is known as the load balancer :ref:`panic threshold +`. It is used to prevent cascading failure when +upstream hosts start failing health checks in large numbers. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst new file mode 100644 index 000000000000..e0b1023eece1 --- /dev/null +++ b/docs/root/faq/overview.rst @@ -0,0 +1,14 @@ +.. _faq_overview: + +FAQ +=== + +.. toctree:: + :maxdepth: 1 + + how_fast_is_envoy + binaries + sni + zone_aware_routing + zipkin_tracing + lb_panic_threshold diff --git a/docs/root/faq/sni.rst b/docs/root/faq/sni.rst new file mode 100644 index 000000000000..21e1e500aa17 --- /dev/null +++ b/docs/root/faq/sni.rst @@ -0,0 +1,52 @@ +.. _faq_how_to_setup_sni: + +How do I setup SNI? +=================== + +`SNI `_ is only supported in the :ref:`v2 +configuration/API `. + +The current implementation has the requirement that the :ref:`filters +` in every :ref:`FilterChain ` must +be identical. In a future release, this requirement will be relaxed so that SNI can be used to +choose between completely different filter chains. :ref:`Domain name matching +` can still be used within the HTTP connection manager to +choose different routes. This is by far the most common use case for SNI. + +The following is a YAML example of the above requirement. + +.. code-block:: yaml + + address: + socket_address: { address: 127.0.0.1, port_value: 1234 } + filter_chains: + - filter_chain_match: + sni_domains: "example.com" + tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "example_com_cert.pem" } + private_key: { filename: "example_com_key.pem" } + filters: + - name: envoy.http_connection_manager + config: + route_config: + virtual_hosts: + - routes: + - match: { prefix: "/" } + route: { cluster: service_foo } + - filter_chain_match: + sni_domains: "www.example.com" + tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: { filename: "www_example_com_cert.pem" } + private_key: { filename: "www_example_com_key.pem" } + filters: + - name: envoy.http_connection_manager + config: + route_config: + virtual_hosts: + - routes: + - match: { prefix: "/" } + route: { cluster: service_foo } diff --git a/docs/root/faq/zipkin_tracing.rst b/docs/root/faq/zipkin_tracing.rst new file mode 100644 index 000000000000..de06ef5d17a8 --- /dev/null +++ b/docs/root/faq/zipkin_tracing.rst @@ -0,0 +1,7 @@ +.. _common_configuration_zipkin_tracing: + +How do I setup Zipkin tracing? +============================== + +Refer to the :ref:`zipkin sandbox setup ` +for an example of zipkin tracing configuration. diff --git a/docs/root/faq/zone_aware_routing.rst b/docs/root/faq/zone_aware_routing.rst new file mode 100644 index 000000000000..3b9b0d2099ec --- /dev/null +++ b/docs/root/faq/zone_aware_routing.rst @@ -0,0 +1,61 @@ +.. _common_configuration_zone_aware_routing: + +How do I setup zone aware routing? +================================== + +There are several steps required for enabling :ref:`zone aware routing ` +between source service ("cluster_a") and destination service ("cluster_b"). + +Envoy configuration on the source service +----------------------------------------- +This section describes the specific configuration for the Envoy running side by side with the source service. +These are the requirements: + +* Envoy must be launched with :option:`--service-zone` option which defines the zone for the current host. +* Both definitions of the source and the destination clusters must have :ref:`sds ` type. +* :ref:`local_cluster_name ` must be set to the source cluster. + + Only essential parts are listed in the configuration below for the cluster manager. + +.. code-block:: json + + { + "sds": "{...}", + "local_cluster_name": "cluster_a", + "clusters": [ + { + "name": "cluster_a", + "type": "sds", + }, + { + "name": "cluster_b", + "type": "sds" + } + ] + } + +Envoy configuration on the destination service +---------------------------------------------- +It's not necessary to run Envoy side by side with the destination service, but it's important that each host +in the destination cluster registers with the discovery service +:ref:`queried by the source service Envoy `. +:ref:`Zone ` information must be available as part of that response. + +Only zone related data is listed in the response below. + +.. code-block:: json + + { + "tags": { + "az": "us-east-1d" + } + } + +Infrastructure setup +-------------------- +The above configuration is necessary for zone aware routing, but there are certain conditions +when zone aware routing is :ref:`not performed `. + +Verification steps +------------------ +* Use :ref:`per zone ` Envoy stats to monitor cross zone traffic. diff --git a/docs/root/favicon.ico b/docs/root/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..7cd45f110377fb45a3fce4ac12af2a4125fbaa34 GIT binary patch literal 67646 zcmeI537i~dnTOv5IYc1Hy4;X}ASe*PgGB*}uqpzJ+zb5tv1=q;>Y}?M2!siWvIig$ zL_yY7SpvlMfQ5u`I4(kt$uPMlb7bzLr|FL_%?ds|)Gn1K2_hgdk=%25ur@Okk zp7;9Rs_qh*#s8Kp5&q4SllPk?hq81sT7vi!|DUHz{F`kKGxWdpV$7+N!0`G1XsdrE9->@y$WUZkEVt znVKzi*=?hEN(iFia(e8+Z)4;a|)6ll2n6alM33t&`X>YsEOIO^ki3 zv0(a}h_c@x!4qM9;bw_`eWQe**eH>gSZ_dm8zmOsB#Gf-{&BShY##J;ylA}_<= zy-A`GV!$vlz$g^4z)(+{Iey#Wq)_s^6M|*mFO6Kr;Hxh%lNaDkr zBz)l(iQL5a5aWw*uSZT4DP^zglj9$cz}`VDsH>Ow^TdV+sSy{l{JBkH99JvG0lII~ z<0$R9d$CCzAE+1i5!7^Fg!2#4O4JGec%#IUD20a0&7V3z=Lo56UWj1}ZLog@Jy1uj zSxXLjnYrekd*pY`C##{(S}w+3+P@)}f4oVemp4i5jz)<-u~njPY?a71*aulR3N;#k zjRA;SFg|FBm_NiabAr-(FhOl-%E|);qn|pUg&44^UIv%0m&7;gBym}t44zRZ#^Jip z)!Z<7o8cI~uZuZdlLXI({q5+PCW$mOOVow}*Z_VX#plP*pV|-Ju^4l~sA@xIP8g>r zFcJRAA#y+*d%zA7wt7jdXMBpsZlRa>Nu7t*O6~h5e*|XU7PM!pbzLjH`c?^@+d^I6 zB;os;CGtGudPXno2hlM6jggeMhZu1omL?vk0;mV}b#VV9<1*ET%ser^p2vEzfgt?> z<_d|lWe+-9aljh$aUeNN92ms^-Q-qB~)N#MA@NZ^CoOP+ZockUCn zN!z|{68>0=guV>#A2v()b+oZXqWvur3BWz6?1=^Gk=W~FBk^ZW*u^~X4Qj`K$LFun zzK&|6B(H=0qvU|MV2yp~Troag{yJC026VnS=&zS#A2zUoc(6iigLM+UbhU&|rar93 zf>F#tbLA@PI_mgCP40mUV1Fz6F~0vMHsFMR1iv3fhKd}JRg`i27(k6cY=}jX5C3n) z@86|<_zgC2ALj$EfY%B1j1E!Z>nG9syBhZQqyNSh)?gQHjP!~%PvmVO-zzGv3)r4v zDw%?Pn7Uwq7(i{1c(_iYH#28kutvfkeOtnRt#zWtxKR$rjC;6?9!{Hhk8YOGXSPZ3 zpSFqrm#q?fvqgff@OPj%%!g2Vy!^8@frD`?`W4*2u~CATzQ??Mqj)lXJnb2peM9$w zoFhD1`v}yTmk}Gj1$XQq+Qj)n7yL)94Xr#-z(1||0N;=cwmqdT&QV1=NE|y|E%+b4a^hPO6=%&#C;&gV@Aytl!2aK zZxY|B+r)Q$y9B=v`xmxJcr9ZG@>BQ4Tj6g>*^kAa95BROFpPQ{CH%@(i9dwCx{>;k zp72SqJ!G{Q@2?r7Zx4sr?A!kJ68aFlPJT~9pQYb(6RfpfSc&@KA7(u@LH==KfuEev zPCQscJird(OQ;X8)tsO6g77*NZc;W6y&hIPyr*GZgtg7Lvx zj?rZHc{;upzsg*P(<+YRJ0$S8ZQ}bn+#e^;zk;tfz`q~v;R*8BT5t$^urt1k9sCMA zxT{%WS8A`ILyTk5oXH&zW7Y;S4(58Gx%7-aO-{I)df*;%#Ix{UgB`eFPd>=>1}DHj z8G?Hs^M!hH!pqnK{iDHKv4IQN=3K1}^_Wd=-*^41v1NKUdVVf=h}L(l;{8&a1n$`; zf#0DnmgD686#R`sjphHXFee_w;ZBZ-=zQ>9V*eB5?1l6JPuE^iCHgM9O}!pSdyr;5 z5&Ide*1_L~{4B@S7E0qlaV#+70_?RWPz@0aVs(;iXZgi-lDKQFBtE}dqDO5ePHK&) z$8gt^ocJDKzq7mOiFbvH|B;g)I&FGKRhxas!@7yioJgjRMw4 z)fRMKPya~!2LB8HR>T}3kwjV}>N%raUa)Eccw7EoG{S$7T;L`TY-Voo1a|QE#DX); zn$+04CR4jkPg_vG>yVB)U1FctE#V7#B($hYLjT$+!QY}fY{7-1`2TPz|IzJ0Yr#+$ z+2OysO(M_2{sH3tjja+tuU(8|IQRO2+FcMM^*!~y;XQA&44uDO2LBN|_yO0BEh9Fp zg*)ep6I?$sA^s_9!YDT2Kx?sw7sw4iqCWiEdKp?^UW@2GNc&|IkI0pMh2~w&@txv3 zuSwh@`x9N_zoJKi-zMKb!Sd@S`=Ae7Fjy`UF@X4AC^H{m7sJGYBsn5LAFvU< zhJHrQ|85i8-wJ!p@zZgxtW5NpFf^YY&sAJIdNE+3R|ae~RTW>cI|P=#cQY=m%c-uCyPjKCkvR zo%`k*m(>ompYt}?s8HK~0p{O<{r_-Y*oGpkr^d@4TS(*o**Gv5g1v`au!cFp(~LJ^ z6Q|U2-4U9SIRZXE3%&>UiR*+;aecO%+OC_rzE}Lq=;_pTOTdnXQChi=rUDz#T%h#; z%!A$32kRLBhkXAC+!qn=&ubR{Jl=P=J9o-HYkhEB1GRx(6F^LO4DP?D_ovqh^zc{| zThRKDo{+|X(c^%|1neLU|6yc<{aVIn*O~K#>;59KbKm89IO5_QWmHZ-x3;C*Ro`nD z*NO1Hyj#5Ahy4oF4DSK>hgeQBrpW_4o$7hE%=e1 z3%2%QcN_KkV66+ZZiAnGm*gvOUJCC&!QIKI*OO)Vn>H}=yui!}gT#VCwSfeFA7Xph zL9%HL^A6^U_fsSM-Crbre7So^4$jMJj_;*c-%l*=A=bnHu^#cihPqJ<=0nIR6gFX$ zsvsW3;P0jv*b4Kv+a&lbb>Th4`zxvMKGv6+TTM+&n+ltHid?DZeBpQ+x&|%A|DQvA z=Pa=edAPPbt~O98le=jX@YWcB1~bAAhOh(X3WFWQgICwe&?77_TF2b$9qObvsYO;X zKbh?3>!+sg67SjYzr9cV|J5trT9#Xo6ZX+w38ean`GxFtxs<)GTl_mE0Q=xZ^c=o_ zN3(dp$b8{6?H7?h52%^O^W2>~o(6ppWA;0o^WytNN9cD8;mmhUWB-B;yhwepnRRa= zdtDxjKX#zGV2BtHK@M!7iQds$^bVd`EAgA(k=WU_#L%7F-~6j=;yb5LeBVXSp=RX9 z{|B)JL&b7NWAUdR97IuUz{a?RIl>B^C&2u+ZP=FfaJ09Yf7d;GNxzd?m+2ja&e0ws zcJL&;Uqkx+f&mnz_Se2(kqzj&6$kKrqgaDU?7)Q$Y{nLTMLzg0{iAaxdnEDgMg7$E zed6tacL4qg#^H(Zr!EM>y`S3gHO7Y+|FM_a4&OhqUqXLHZL#lk>}gE)*p2pj{j9b0 zEUDK%M6c*0oC{vSHKPmR{_o@hoi8$1Na#7DiSSPm5909m!G9a7TSGn=Z6C(_ct7>M zP5eVB#c~>3FjN+QYqa73V;YeU{Da7c&-bEraDNHzHC*rUCF1@^^g90W?!%t`Jk5Eq zKDl0oE~O6mKKbBf#5^J1iyi1Sg`6i!>bU~?L#6eA858ocU@#5)6rw&DT0GffSI8e5 z7!QBg`b@2U6;{F`$e@f=8ll}Zt@p|oNaqZc`FrQ1` z=x^v7T}k}sJaghV@UEi=q_H459{xH} zI+6D>f43w`|4`?PiA$*IZ!`M_$yLM!t|v`IcRhcz4#?L7J{Rm);QveT{Vz01AJ^N> zoSJ?jd?)f;?HjyFjm&k!7gG;Bi1eCruf^XQt$Bgv|I`E8ADDFh&-=eGRqX%8{IP>} z`a?hJ6ZZo0{qbGYx78d!5z-?+SC1juk2>%K*mHeY@?l~?uf^RO^ZYY?V&(#s;GfU` zRvnNZ{Ts;tH|{yR0|LTsQJ8`y4cn6-t>Iiqs(w~H@@a``{c-K+Zpv7RR?BY*E=yLg8z{>%jp)%O1H#sA&a_&*Mo zJM)x0|0wZ4PW&%ESETvCROb32t|QCzjLJ~|Rr&7>uX4X;{QNUL!m{&sWb=R3|I1Bw zeA_GKPj4`DU2#4RSpHu*{@6ge%71*k%KfbI@@HuvPPaxv}TASID1h&iOv_$lpJ%6n`iDQI&s=$kZ;3pMRnv{$I-9iUS2>)&DDK z62<)W_Xu*oRh#*3BC8%?tPp=M@!y;C{{rrre875@{~pTUN&ZI}{@8#~r~*5v@}ENf zeBUtp+tu6E0s0#QoF7i;H6xRr|HFE*#h)=<$X=JL{3{5(VAG}Xe{BAVvhjDp-zA0QY2`a)L8>sRxH`sA*Z~E}} z;QyYg|BsVfxt_LT{<-;p{uYsb$B^G5nSB1Y;=fxwDbxQ={-t@K8vn~hdgSd)5B?tF zf0h5pgev>!c=_{vg0lNRzCWb?kJ8kFMxm@Yp#JZu_J1p81f~4>J-QL^U84M{0rh*O zWsCpH-&5sZF}CB`a3Ozwv%Gn9{+Zt+!9SM|s{TJ7kURODQvTWdSY%>= z+JZF?WYl}v!9R}w$0yzYsq)`RU}b*Uc==-sW#;d~|J@n?Ck~|D;x<&SoUD4Fn*Ym; zyl8Wy@h^=7RsKbXZI4-*Zv69c!18}jHUC%8+ARL&@7%_kEx*r?=6pc?pBjMQp3MAC zeHrq<8UMZ1|0oy#tNbendxTAom%rZUw37U(1&l&5H?;ZzRsSD>Te%)8Gp<#?!&Klik1CI;wz?TTVRHYQ~G$I9aWRsQ9mH~DRhpMU1wb;SR2 z@^|I@pB!MQ@_wIN#{V5v|DQbS5x0=%A6m$Lu9~g;9_oD#nG@vVKqK`Z{o#u6CkLdf z{71x7@dwA^uQp)i1I9G9U!J;a&PJ(~W=L4$S!g zb-@N-_54pMwz91k^2hI+3w>Xg^YuV9&0HXb@0Zp8L*h&k2Xg*j~uG!8UNG)*0lA_fdGPCEY=^2Y|!!~jE8_5X^Y9&y7Kf9{36 zko%uEPc45(#{ZQ+GE}8@fIU?6|A^$u^I$RmaC57K4OL4{W;|%g`G2I0{%`URSNWF* z=1y#7{QM)?_+MWBuuoO_?*zCqy<}qiBjo=|@plma9oxl!zc|_h;`otU>V(0vB*k^oOYddad|xjRkoiO-)!K{tohgF@LKL@QdBW{C|a4 z>`T02`vUiSJ4EmKHp!1a<6o}#id`rDf7K+ui(AD1AnaGc+-q^SMvFhSA^f8$`T=Fj z|HS>FeEc{0TXvAI2OZ&%xSEIqD}Ca8)GLl#d}6;Cn>fwIy>4rzR%1a$?oGdk8Obc} zp|nqnxR2}<-~4UjyR?P-B)3R#DXdp*lW+?$Aj)W{EdKd1lEwy7#DF1c!LrQ%lz$|b z|3~JJ9nc?gMVSY9Q77EjV+TK@FZi{fc+Rq6$HcPPdl;*-e;mDk7VHmflhCK(eDgL5 zJl!h3hE@r7vF<~$HVLOj=N~RB|0w;Rh&VF*v4LFt&-?#&`CynBkmOvkpL0ZO(XXio z?)HoAa*x>08j#Mpe(5^6S{vj|NALf-S$aO$EdF_VKlE+lxvEV9_qI#u*=-VBk3!od zl*A^|$SBkZc97*C#{a{U_WwM8M+!R_4}a|u55s&A`B?5n>(EQs!V+@AB_6Sz?5N)V zIcEUs?{NHRoA@rH&iet}Uxx8|#$M!Sc?b=o9rE8s{x{=)+4wJ_2e=+}z&{WYdxGWR zQvWaP2^IDS;jZV2g8^~(lM7Z67ygynV4+|7&UZ`yM+|BkV!^%!aW8j3_V5Kh?_OQh z^fvDQPCa*0yZArXCjOh+CGar!Mt`$YLbeVGg^*zhJ1|t(K#?r}ZzJck97q0U3AMA` z_2i04-~T-%wsRt4zY9GVmVxGw47kH$AKVdtYhGZE8Vj7%1}+=gf*q_tPf#D+hAo`! z6z5?xX|A9(eV23{-Y&j#x+VAx?vwl^daYA}jqo2J21MaLXtCFENBpV(lJIs>CpNmo_uLLjR-st~AjlaA@{99n} zMMj~p1^nL>Hh~SOEL#YZ55hy#1rdDS!EzHd;#=6jGl;q|cy^n3kDe@lgFH>$cNnq% zw1C(?5BFPG{xQr~!oM5ESmyjd@w{Mc{?0h;J*XA_Z^Qptt|PkFBhIVnADjaF_oF?g zcVynr+qQoPHqO1Vuk8~5{b(6>@D6_8)+qrmW4w^PE?ew%Eak5?0gRak20NG|yow%a zmB1qH5p9+5DO(w81ZW5Hf6R8QmOb&P;DE1YIoM0b7A(X@p^c+#{ zJmFY&KrUde;7T!9v||HrqDT1+f$RCrxl_>5EHe*k+t=EM-EGu;=W`rn6Ynv-5v$M+w~1Jewp_{_b=!Y->p63e+>42Am-Pz-pg_n_QT`nA5{Ls#D5#?*P>@(e=oXD z^E~76#FshBXe#`~kovvd(sH0*`cH@VE$A8gMC*_TdkB*=lG-yYofj0w1DzXW=LgpH zq+%TvH5Me{9zc!ch1ax) zh`#HQfh)Yya|(TfLwWDLcZN?TUjv7II;oS;{BHU;J>t6?Jq7bOP!kHFB>abo3x=xH z21@y}K1@#Vz`hOkD?22-l-PdDJPXZG_lS73f{UQldH(4ScD z&=`;vc_DNC$acBGvIA>`J@Fu;Vd8)TZ9$JyA7TUcQ^*TCN0^d0Hy)eOeWBJqsE_)t zN4(ecis$Ej;%)2|pA)|~@(Qq=*Y-9Lq!!3%m@(Bw&yU&=_I`Q-fqQumdcvNgl=*n} zYbu{p%lVxS>6l}eu6g+W1;haD8$E=U!(a0P^F?Qbb3}R_Y3}@SVH_BNKi@ZXhGB0< ztEmm1rABxFJNO6IKg*o)*sLu~`CLJ3`bO*!A3n}5;RSZ_e|13oKfnf-^^0#UqXX{Y zJbSf+JbPWHNfg5dTF}cLaoq>|^R$n$D|$!T!_gei zJ?}r)C;lZce-&**F_g54Kh-b(;fe7NCgJSDrZ&O+@$6ht1h-9#)>c4 z!0ZU;eW?TIGgrKZcyJ&3Pwe0=)J(15#}3pMhAn%r{NEZ2@<8GDjlKAOGdA%m_V6g@ zl)jD~TtY0EueE{3q3m%QYxgGnH{GUgyNl1%FRnBC#r=1E;`t^%xfI^7!Mg?CK9&s? zHnE+GZ6T8vv=$gdUYKts#=pV%i*E7V1OF?@AE#ysY*hY~uV3_qx0q-@n%; zp8tmTTKKyLU=H_TWE6^6KpZe-#e#l%Ju22iaPMV4P|y7EWyZ&-8*kz91)VZ5w~{@a z34Z>vK3!u$fa8hZAMcUgv-~n}Be?)Ou>T)*fejnbxxsk(^Idee&JPmwi~R8KMGeG( zm(YE7u05hIIB_x`0K1({9dnXh+?S#ycJaKb_c=y=c6=Ts&@eWTE@ZFERva*Mf#w5! z%;b;1_c1s86aM~V9>0NF@RM3cs?Tfun#?}$4zK07?rl&XQWu&6SALi-;Y=C=IIK$M1J#KNn%J^^8i3_oXbCqFGf|=_P$EppK zw_c}&K7@~-Jir`-`;OEcb?=?DC zv`Oe>Jujr!fb0(J%CV1{577IxH_o-9$It`*I7~0+8qzz71HSYqs9q$?s2(>dmOeQKg)6W z=k9xMaks`1_%rYKhLMdq0`o%Of53lHm$(&S{mK`)J!U&zssm z#$53->VgM}2d@(gHU_zdj9d|*KG5q+xyCRb4@&(X-r3(M%=|u~+5-13aCO1?P4pk= z8sa>CJNp9q2GdxJrS0(pf{YK|R#DTvdC*0VP;NHY6sK-ncu6&|8pb! zxu2~ohR;W{it9a2t$Udi-rv?*wqgPG0QWxExZoQi2E_4ydOsc;%FZKn&Ul7`=~E_$X8m|b8hzVGM6v_X$>#8~DCa z=9)sic0hlpmb{QVhn$-ixCTk*nt!FTu)YB{?r8#_&Z=<#~kr_ z^q`IYlAiyghB!{=lVkN3cJ#Tbf1mgfATP|)I+1vAhFb={0{>rP2V1cLt}ibye|WoG z;@yJ3KW(FD$@|VH$LsglW^CVP;>61T3hf=~d1h*ZGwB^%3;%n_3)G0tH>m|%u!Sfi zdBM%KB*i@i1+x?2e*t#zb@T*r0XuLu5EJ^y3w-y)rRNO`?q!p?uhlU9 zp_ujuP(9}m{=jyY!v0otJ~{q)?G;!46I9<}GDkq;f!ctNYY}~7I~M;xjX3aC?BGf2 zgSW8(AN=(il0yE>2lf1+%gvmy)+LUga2>&oFh7-ZRLAIfDz?jaRI9muGHj;$wK`X@ zN&WtOZ|&pQ!40tgKI3nR1*=gf@n5ei)pLdIi1P!^=kfa*{Qg=e-)o@8dw;d2pK5C5 zdq4G8?Ir7VK%DbAO0P#H4qOWJ?{VGG9~j?ZM6Ta5-pTkSVgqxD-Xn?m2l9HYE2{N< z`H<|=EvX$ir000%2iM{MPvh@gCz8=m8LuQy%=1dyLA!J(r})hc^Iz)x?7}GP-E)fn z{^z}l*F}of(;_v64|40LgsYkh)_Hl+22#3ye*OsoT`$<9Ze1_fpvYc- zF4!YBH;0$hSQ}t(&S#Tzt#$pIKF3;LqC1oMkNHSu+q_5i@8(N&Bjy9jR&RiqBNeA z#v3avS@9?vme_}Epel`LrSYyb9$K$kqMt_pmBv$ZkMi->T*~jK73XpXCbOkn*wnaI z9Pjm|`ORsZE6x{euqdDC9_6CD?jak@MPXfMW4Y|0>uj(f{qVpNu|AysIr9;7RCJv^ fnyaJrj}6pV%b78kjIxiG=Hu=hbn^+L=J)>x5u&*7 literal 0 HcmV?d00001 diff --git a/docs/root/index.rst b/docs/root/index.rst new file mode 100644 index 000000000000..35777d84f25f --- /dev/null +++ b/docs/root/index.rst @@ -0,0 +1,25 @@ +Envoy documentation +================================= + +.. ifconfig:: release_level in ('pre-release') + + .. attention:: + + This is pre-release documentation generated directly from + `data-plane-api `_. There is risk of it not + being consistent with what is currently implemented in Envoy, though we try to make things + consistent as quickly as possible. + +.. toctree:: + :maxdepth: 2 + + about_docs + intro/intro + start/start + install/install + configuration/configuration + operations/operations + extending/extending + api-v1/api + api-v2/api + faq/overview diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst new file mode 100644 index 000000000000..c0c0e0084886 --- /dev/null +++ b/docs/root/install/building.rst @@ -0,0 +1,62 @@ +.. _building: + + +Building +======== + +The Envoy build system uses Bazel. In order to ease initial building and for a quick start, we +provide an Ubuntu 16 based docker container that has everything needed inside of it to build +and *statically link* envoy, see :repo:`ci/README.md`. + +In order to build manually, follow the instructions at :repo:`bazel/README.md`. + +.. _install_requirements: + +Requirements +------------ + +Envoy was initially developed and deployed on Ubuntu 14 LTS. It should work on any reasonably +recent Linux including Ubuntu 16 LTS. + +Building Envoy has the following requirements: + +* GCC 5+ (for C++14 support). +* These :repo:`pre-built ` third party dependencies. +* These :repo:`Bazel native ` dependencies. + +Please see the linked :repo:`CI ` and :repo:`Bazel ` documentation +for more information on performing manual builds. + +.. _install_binaries: + +Pre-built binaries +------------------ + +On every master commit we create a set of lightweight Docker images that contain the Envoy +binary. We also tag the docker images with release versions when we do official releases. + +* `envoyproxy/envoy `_: Release binary with + symbols stripped on top of an Ubuntu Xenial base. +* `envoyproxy/envoy-alpine `_: Release + binary with symbols stripped on top of a **glibc** alpine base. +* `envoyproxy/envoy-alpine-debug `_: + Release binary with debug symbols on top of a **glibc** alpine base. + +We will consider producing additional binary types depending on community interest in helping with +CI, packaging, etc. Please open an `issue `_ in GitHub +if desired. + +Modifying Envoy +--------------- + +If you're interested in modifying Envoy and testing your changes, one approach +is to use Docker. This guide will walk through the process of building your own +Envoy binary, and putting the binary in an Ubuntu container. + +.. toctree:: + :maxdepth: 1 + + sandboxes/local_docker_build + + + diff --git a/docs/root/install/install.rst b/docs/root/install/install.rst new file mode 100644 index 000000000000..c53acab15dc6 --- /dev/null +++ b/docs/root/install/install.rst @@ -0,0 +1,11 @@ +.. _install: + +Building and installation +========================= + +.. toctree:: + :maxdepth: 2 + + building + ref_configs + tools/tools diff --git a/docs/root/install/ref_configs.rst b/docs/root/install/ref_configs.rst new file mode 100644 index 000000000000..80380eeaa405 --- /dev/null +++ b/docs/root/install/ref_configs.rst @@ -0,0 +1,58 @@ +.. _install_ref_configs: + +Reference configurations +======================== + +The source distribution includes a set of example configuration templates for each of the three +major Envoy deployment types: + +* :ref:`Service to service ` +* :ref:`Front proxy ` +* :ref:`Double proxy ` + +The goal of this set of example configurations is to demonstrate the full capabilities of Envoy in +a complex deployment. All features will not be applicable to all use cases. For full documentation +see the :ref:`configuration reference `. + +Configuration generator +----------------------- + +Envoy configurations can become relatively complicated. At Lyft we use `jinja +`_ templating to make the configurations easier to create and manage. The +source distribution includes a version of the configuration generator that loosely approximates what +we use at Lyft. We have also included three example configuration templates for each of the above +three scenarios. + +* Generator script: :repo:`configs/configgen.py` +* Service to service template: :repo:`configs/envoy_service_to_service.template.json` +* Front proxy template: :repo:`configs/envoy_front_proxy.template.json` +* Double proxy template: :repo:`configs/envoy_double_proxy.template.json` + +To generate the example configurations run the following from the root of the repo: + +.. code-block:: console + + mkdir -p generated/configs + bazel build //configs:example_configs + tar xvf $PWD/bazel-genfiles/configs/example_configs.tar -C generated/configs + +The previous command will produce three fully expanded configurations using some variables +defined inside of `configgen.py`. See the comments inside of `configgen.py` for detailed +information on how the different expansions work. + +A few notes about the example configurations: + +* An instance of :ref:`service discovery service ` is assumed + to be running at `discovery.yourcompany.net`. +* DNS for `yourcompany.net` is assumed to be setup for various things. Search the configuration + templates for different instances of this. +* Tracing is configured for `LightStep `_. To + disable this or enable `Zipkin ` tracing, delete or + change the :ref:`tracing configuration ` accordingly. +* The configuration demonstrates the use of a :ref:`global rate limiting service + `. To disable this delete the :ref:`rate limit configuration + `. +* :ref:`Route discovery service ` is configured for the service to service + reference configuration and it is assumed to be running at `rds.yourcompany.net`. +* :ref:`Cluster discovery service ` is configured for the service to + service reference configuration and it is assumed that be running at `cds.yourcompany.net`. diff --git a/docs/root/install/sandboxes/local_docker_build.rst b/docs/root/install/sandboxes/local_docker_build.rst new file mode 100644 index 000000000000..578c275a29dc --- /dev/null +++ b/docs/root/install/sandboxes/local_docker_build.rst @@ -0,0 +1,35 @@ +.. _install_sandboxes_local_docker_build: + +Building an Envoy Docker image +============================== + +The following steps guide you through building your own Envoy binary, and +putting that in a clean Ubuntu container. + +**Step 1: Build Envoy** + +Using ``envoyproxy/envoy-build`` you will compile Envoy. +This image has all software needed to build Envoy. From your Envoy directory:: + + $ pwd + src/envoy + $ ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release' + +That command will take some time to run because it is compiling an Envoy binary and running tests. + +For more information on building and different build targets, please refer to :repo:`ci/README.md`. + +**Step 2: Build image with only envoy binary** + +In this step we'll build an image that only has the Envoy binary, and none +of the software used to build it.:: + + $ pwd + src/envoy/ + $ docker build -f ci/Dockerfile-envoy-image -t envoy . + +Now you can use this ``envoy`` image to build the any of the sandboxes if you change +the ``FROM`` line in any Dockerfile. + +This will be particularly useful if you are interested in modifying Envoy, and testing +your changes. diff --git a/docs/root/install/tools/config_load_check_tool.rst b/docs/root/install/tools/config_load_check_tool.rst new file mode 100644 index 000000000000..29e9701bdc9d --- /dev/null +++ b/docs/root/install/tools/config_load_check_tool.rst @@ -0,0 +1,30 @@ +.. _install_tools_config_load_check_tool: + +Config load check tool +====================== + +The config load check tool checks that a configuration file in JSON format is written using valid JSON +and conforms to the Envoy JSON schema. This tool leverages the configuration test in +``test/config_test/config_test.cc``. The test loads the JSON configuration file and runs server configuration +initialization with it. + +Input + The tool expects a PATH to the root of a directory that holds JSON Envoy configuration files. The tool + will recursively go through the file system tree and run a configuration test for each file found. Keep in mind that + the tool will try to load all files found in the path. + +Output + The tool will output Envoy logs as it initializes the server configuration with the config it is currently testing. + If there are configuration files where the JSON file is malformed or is does not conform to the Envoy JSON schema, the + tool will exit with status EXIT_FAILURE. If the tool successfully loads all configuration files found it will + exit with status EXIT_SUCCESS. + +Building + The tool can be built locally using Bazel. :: + + bazel build //test/tools/config_load_check:config_load_check_tool + +Running + The tool takes a path as described above. :: + + bazel-bin/test/tools/config_load_check/config_load_check_tool PATH diff --git a/docs/root/install/tools/route_table_check_tool.rst b/docs/root/install/tools/route_table_check_tool.rst new file mode 100644 index 000000000000..f6b9ed2f87b6 --- /dev/null +++ b/docs/root/install/tools/route_table_check_tool.rst @@ -0,0 +1,65 @@ +.. _install_tools_route_table_check_tool: + +Route table check tool +======================= + +The route table check tool checks whether the route parameters returned by a router match what is expected. +The tool can also be used to check whether a path redirect, path rewrite, or host rewrite +match what is expected. + +Input + The tool expects two input JSON files: + + 1. A router config JSON file. The router config JSON file schema is found in + :ref:`config `. + + 2. A tool config JSON file. The tool config JSON file schema is found in + :ref:`config `. + The tool config input file specifies urls (composed of authorities and paths) + and expected route parameter values. Additional parameters such as additional headers are optional. + +Output + The program exits with status EXIT_FAILURE if any test case does not match the expected route parameter + value. + + The ``--details`` option prints out details for each test. The first line indicates the test name. + + If a test fails, details of the failed test cases are printed. The first field is the expected + route parameter value. The second field is the actual route parameter value. The third field indicates + the parameter that is compared. In the following example, Test_2 and Test_5 failed while the other tests + passed. In the failed test cases, conflict details are printed. :: + + Test_1 + Test_2 + default other virtual_host_name + Test_3 + Test_4 + Test_5 + locations ats cluster_name + Test_6 + + Testing with valid :ref:`runtime values ` is not currently supported, + this may be added in future work. + +Building + The tool can be built locally using Bazel. :: + + bazel build //test/tools/router_check:router_check_tool + +Running + The tool takes two input json files and an optional command line parameter ``--details``. The + expected order of command line arguments is: + 1. The router configuration json file. + 2. The tool configuration json file. + 3. The optional details flag. :: + + bazel-bin/test/tools/router_check/router_check_tool router_config.json tool_config.json + + bazel-bin/test/tools/router_check/router_check_tool router_config.json tool_config.json --details + +Testing + A bash shell script test can be run with bazel. The test compares routes using different router and + tool configuration json files. The configuration json files can be found in + test/tools/router_check/test/config/... . :: + + bazel test //test/tools/router_check/... diff --git a/docs/root/install/tools/schema_validator_check_tool.rst b/docs/root/install/tools/schema_validator_check_tool.rst new file mode 100644 index 000000000000..7ba3dca22956 --- /dev/null +++ b/docs/root/install/tools/schema_validator_check_tool.rst @@ -0,0 +1,33 @@ +.. _install_tools_schema_validator_check_tool: + +Schema Validator check tool +=========================== + +The schema validator tool validates that the passed in JSON conforms to a schema in +the configuration. To validate the entire config, please refer to the +:ref:`config load check tool`. Currently, only +:ref:`route config` schema validation is supported. + +Input + The tool expects two inputs: + + 1. The schema type to check the passed in JSON against. The supported type is: + + * `route` - for :ref:`route configuration` validation. + + 2. The path to the JSON. + +Output + If the JSON conforms to the schema, the tool will exit with status EXIT_SUCCESS. If the JSON does + not conform to the schema, an error message is outputted detailing what doesn't conform to the + schema. The tool will exit with status EXIT_FAILURE. + +Building + The tool can be built locally using Bazel. :: + + bazel build //test/tools/schema_validator:schema_validator_tool + +Running + The tool takes a path as described above. :: + + bazel-bin/test/tools/schema_validator/schema_validator_tool --schema-type SCHEMA_TYPE --json-path PATH diff --git a/docs/root/install/tools/tools.rst b/docs/root/install/tools/tools.rst new file mode 100644 index 000000000000..40ccce85ea37 --- /dev/null +++ b/docs/root/install/tools/tools.rst @@ -0,0 +1,9 @@ +Tools +===== + +.. toctree:: + :maxdepth: 2 + + config_load_check_tool + route_table_check_tool + schema_validator_check_tool diff --git a/docs/root/intro/arch_overview/access_logging.rst b/docs/root/intro/arch_overview/access_logging.rst new file mode 100644 index 000000000000..19cd82f83a26 --- /dev/null +++ b/docs/root/intro/arch_overview/access_logging.rst @@ -0,0 +1,19 @@ +.. _arch_overview_access_logs: + +Access logging +=================== + +The :ref:`HTTP connection manager ` and +:ref:`tcp proxy ` supports extensible access logging with the following +features: + +* Any number of access logs per connection manager or tcp proxy. +* Asynchronous IO flushing architecture. Access logging will never block the main network processing + threads. +* Customizable access log formats using predefined fields as well as arbitrary HTTP request and + response headers. +* Customizable access log filters that allow different types of requests and responses to be written + to different access logs. + +Access log :ref:`configuration `. + diff --git a/docs/root/intro/arch_overview/arch_overview.rst b/docs/root/intro/arch_overview/arch_overview.rst new file mode 100644 index 000000000000..0b5e7d9b73ab --- /dev/null +++ b/docs/root/intro/arch_overview/arch_overview.rst @@ -0,0 +1,38 @@ +Architecture overview +===================== + +.. toctree:: + :maxdepth: 2 + + terminology + threading_model + listeners + listener_filters + network_filters + http_connection_management + http_filters + http_routing + grpc + websocket + cluster_manager + service_discovery + health_checking + connection_pooling + load_balancing + outlier + circuit_breaking + global_rate_limiting + ssl + statistics + runtime + tracing + tcp_proxy + access_logging + mongo + dynamo + redis + hot_restart + dynamic_configuration + init + draining + scripting diff --git a/docs/root/intro/arch_overview/circuit_breaking.rst b/docs/root/intro/arch_overview/circuit_breaking.rst new file mode 100644 index 000000000000..98f3e747a934 --- /dev/null +++ b/docs/root/intro/arch_overview/circuit_breaking.rst @@ -0,0 +1,38 @@ +.. _arch_overview_circuit_break: + +Circuit breaking +================ + +Circuit breaking is a critical component of distributed systems. It’s nearly always better to fail +quickly and apply back pressure downstream as soon as possible. One of the main benefits of an Envoy +mesh is that Envoy enforces circuit breaking limits at the network level as opposed to having to +configure and code each application independently. Envoy supports various types of fully distributed +(not coordinated) circuit breaking: + +* **Cluster maximum connections**: The maximum number of connections that Envoy will establish to + all hosts in an upstream cluster. In practice this is only applicable to HTTP/1.1 clusters since + HTTP/2 uses a single connection to each host. +* **Cluster maximum pending requests**: The maximum number of requests that will be queued while + waiting for a ready connection pool connection. In practice this is only applicable to HTTP/1.1 + clusters since HTTP/2 connection pools never queue requests. HTTP/2 requests are multiplexed + immediately. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow + ` counter for the cluster will increment. +* **Cluster maximum requests**: The maximum number of requests that can be outstanding to all hosts + in a cluster at any given time. In practice this is applicable to HTTP/2 clusters since HTTP/1.1 + clusters are governed by the maximum connections circuit breaker. If this circuit breaker + overflows the :ref:`upstream_rq_pending_overflow ` counter + for the cluster will increment. +* **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all + hosts in a cluster at any given time. In general we recommend aggressively circuit breaking + retries so that retries for sporadic failures are allowed but the overall retry volume cannot + explode and cause large scale cascading failure. If this circuit breaker overflows the + :ref:`upstream_rq_retry_overflow ` counter for the cluster + will increment. + +Each circuit breaking limit is :ref:`configurable ` +and tracked on a per upstream cluster and per priority basis. This allows different components of +the distributed system to be tuned independently and have different limits. + +Note that circuit breaking will cause the :ref:`x-envoy-overloaded +` header to be set by the router filter in the +case of HTTP requests. diff --git a/docs/root/intro/arch_overview/cluster_manager.rst b/docs/root/intro/arch_overview/cluster_manager.rst new file mode 100644 index 000000000000..71739a4a302c --- /dev/null +++ b/docs/root/intro/arch_overview/cluster_manager.rst @@ -0,0 +1,49 @@ +.. _arch_overview_cluster_manager: + +Cluster manager +=============== + +Envoy’s cluster manager manages all configured upstream clusters. Just as the Envoy configuration +can contain any number of listeners, the configuration can also contain any number of independently +configured upstream clusters. + +Upstream clusters and hosts are abstracted from the network/HTTP filter stack given that upstream +clusters and hosts may be used for any number of different proxy tasks. The cluster manager exposes +APIs to the filter stack that allow filters to obtain a L3/L4 connection to an upstream cluster, or +a handle to an abstract HTTP connection pool to an upstream cluster (whether the upstream host +supports HTTP/1.1 or HTTP/2 is hidden). A filter stage determines whether it needs an L3/L4 +connection or a new HTTP stream and the cluster manager handles all of the complexity of knowing +which hosts are available and healthy, load balancing, thread local storage of upstream connection +data (since most Envoy code is written to be single threaded), upstream connection type (TCP/IP, +UDS), upstream protocol where applicable (HTTP/1.1, HTTP/2), etc. + +Clusters known to the cluster manager can be configured either statically, or fetched dynamically +via the cluster discovery service (CDS) API. Dynamic cluster fetches allow more configuration to +be stored in a central configuration server and thus requires fewer Envoy restarts and configuration +distribution. + +* Cluster manager :ref:`configuration `. +* CDS :ref:`configuration `. + +Cluster warming +--------------- + +When clusters are initialized both at server boot as well as via CDS, they are "warmed." This means +that clusters do not become available until the following operations have taken place. + +* Initial service discovery load (e.g., DNS resolution, EDS update, etc.). +* Initial active :ref:`health check ` pass if active health checking + is configured. Envoy will send a health check request to each discovered host to determine its + initial health status. + +The previous items ensure that Envoy has an accurate view of a cluster before it begins using it +for traffic serving. + +When discussing cluster warming, the cluster "becoming available" means: + +* For newly added clusters, the cluster will not appear to exist to the rest of Envoy until it has + been warmed. I.e., HTTP routes that reference the cluster will result in either a 404 or 503 + (depending on configuration). +* For updated clusters, the old cluster will continue to exist and serve traffic. When the new + cluster has been warmed, it will be atomically swapped with the old cluster such that no + traffic interruptions take place. diff --git a/docs/root/intro/arch_overview/connection_pooling.rst b/docs/root/intro/arch_overview/connection_pooling.rst new file mode 100644 index 000000000000..d39815abbf6e --- /dev/null +++ b/docs/root/intro/arch_overview/connection_pooling.rst @@ -0,0 +1,37 @@ +.. _arch_overview_conn_pool: + +Connection pooling +================== + +For HTTP traffic, Envoy supports abstract connection pools that are layered on top of the underlying +wire protocol (HTTP/1.1 or HTTP/2). The utilizing filter code does not need to be aware of whether +the underlying protocol supports true multiplexing or not. In practice the underlying +implementations have the following high level properties: + +HTTP/1.1 +-------- + +The HTTP/1.1 connection pool acquires connections as needed to an upstream host (up to the circuit +breaking limit). Requests are bound to connections as they become available, either because a +connection is done processing a previous request or because a new connection is ready to receive its +first request. The HTTP/1.1 connection pool does not make use of pipelining so that only a single +downstream request must be reset if the upstream connection is severed. + +HTTP/2 +------ + +The HTTP/2 connection pool acquires a single connection to an upstream host. All requests are +multiplexed over this connection. If a GOAWAY frame is received or if the connection reaches the +maximum stream limit, the connection pool will create a new connection and drain the existing one. +HTTP/2 is the preferred communication protocol as connections rarely if ever get severed. + +.. _arch_overview_conn_pool_health_checking: + +Health checking interactions +---------------------------- + +If Envoy is configured for either active or passive :ref:`health checking +`, all connection pool connections will be closed on behalf of a host +that transitions from a healthy state to an unhealthy state. If the host reenters the load +balancing rotation it will create fresh connections which will maximize the chance of working +around a bad flow (due to ECMP route or something else). diff --git a/docs/root/intro/arch_overview/draining.rst b/docs/root/intro/arch_overview/draining.rst new file mode 100644 index 000000000000..a7ac2aa812fa --- /dev/null +++ b/docs/root/intro/arch_overview/draining.rst @@ -0,0 +1,35 @@ +.. _arch_overview_draining: + +Draining +======== + +Draining is the process by which Envoy attempts to gracefully shed connections in response to +various events. Draining occurs at the following times: + +* The server has been manually health check failed via the :ref:`healthcheck/fail + ` admin endpoint. See the :ref:`health check filter + ` architecture overview for more information. +* The server is being :ref:`hot restarted `. +* Individual listeners are being modified or removed via :ref:`LDS + `. + +Each :ref:`configured listener ` has a :ref:`drain_type +` setting which controls when draining takes place. The currently +supported values are: + +default + Envoy will drain listeners in response to all three cases above (admin drain, hot restart, and + LDS update/remove). This is the default setting. + +modify_only + Envoy will drain listeners only in response to the 2nd and 3rd cases above (hot restart and + LDS update/remove). This setting is useful if Envoy is hosting both ingress and egress listeners. + It may be desirable to set *modify_only* on egress listeners so they only drain during + modifications while relying on ingress listener draining to perform full server draining when + attempting to do a controlled shutdown. + +Note that although draining is a per-listener concept, it must be supported at the network filter +level. Currently the only filters that support graceful draining are +:ref:`HTTP connection manager `, +:ref:`Redis `, and +:ref:`Mongo `. diff --git a/docs/root/intro/arch_overview/dynamic_configuration.rst b/docs/root/intro/arch_overview/dynamic_configuration.rst new file mode 100644 index 000000000000..9ff98d255fa8 --- /dev/null +++ b/docs/root/intro/arch_overview/dynamic_configuration.rst @@ -0,0 +1,84 @@ +.. _arch_overview_dynamic_config: + +Dynamic configuration +===================== + +Envoy is architected such that different types of configuration management approaches are possible. +The approach taken in a deployment will be dependent on the needs of the implementor. Simple +deployments are possible with a fully static configuration. More complicated deployments can +incrementally add more complex dynamic configuration, the downside being that the implementor must +provide one or more external REST based configuration provider APIs. This document gives an overview +of the options currently available. + +* Top level configuration :ref:`reference `. +* :ref:`Reference configurations `. +* Envoy :ref:`v2 API overview `. + +Fully static +------------ + +In a fully static configuration, the implementor provides a set of :ref:`listeners +` (and :ref:`filter chains `), :ref:`clusters +`, and optionally :ref:`HTTP route configurations +`. Dynamic host discovery is only possible via DNS based +:ref:`service discovery `. Configuration reloads must take place +via the built in :ref:`hot restart ` mechanism. + +Though simplistic, fairly complicated deployments can be created using static configurations and +graceful hot restarts. + +.. _arch_overview_dynamic_config_sds: + +SDS/EDS only +------------ + +The :ref:`service discovery service (SDS) API ` provides a more advanced +mechanism by which Envoy can discover members of an upstream cluster. SDS has been renamed to :ref:`Endpoint +Discovery Service (EDS)` in the +:ref:`v2 API `. Layered on top of a static +configuration, SDS allows an Envoy deployment to circumvent the limitations of DNS (maximum records +in a response, etc.) as well as consume more information used in load balancing and routing (e.g., +canary status, zone, etc.). + +.. _arch_overview_dynamic_config_cds: + +SDS/EDS and CDS +--------------- + +The :ref:`cluster discovery service (CDS) API ` layers on a mechanism by +which Envoy can discover upstream clusters used during routing. Envoy will gracefully add, update, +and remove clusters as specified by the API. This API allows implementors to build a topology in +which Envoy does not need to be aware of all upstream clusters at initial configuration time. +Typically, when doing HTTP routing along with CDS (but without route discovery service), +implementors will make use of the router's ability to forward requests to a cluster specified in an +:ref:`HTTP request header `. + +Although it is possible to use CDS without SDS/EDS by specifying fully static clusters, we recommend +still using the SDS/EDS API for clusters specified via CDS. Internally, when a cluster definition is +updated, the operation is graceful. However, all existing connection pools will be drained and +reconnected. SDS/EDS does not suffer from this limitation. When hosts are added and removed via SDS/EDS, +the existing hosts in the cluster are unaffected. + +.. _arch_overview_dynamic_config_rds: + +SDS/EDS, CDS, and RDS +--------------------- + +The :ref:`route discovery service (RDS) API ` layers on a mechanism by which +Envoy can discover the entire route configuration for an HTTP connection manager filter at runtime. +The route configuration will be gracefully swapped in without affecting existing requests. This API, +when used alongside SDS/EDS and CDS, allows implementors to build a complex routing topology +(:ref:`traffic shifting `, blue/green +deployment, etc.) that will not require any Envoy restarts other than to obtain a new Envoy binary. + +.. _arch_overview_dynamic_config_lds: + +SDS/EDS, CDS, RDS, and LDS +-------------------------- + +The :ref:`listener discovery service (LDS) ` layers on a mechanism by which +Envoy can discover entire listeners at runtime. This includes all filter stacks, up to and including +HTTP filters with embedded references to :ref:`RDS `. Adding LDS into +the mix allows almost every aspect of Envoy to be dynamically configured. Hot restart should +only be required for very rare configuration changes (admin, tracing driver, etc.) or binary +updates. diff --git a/docs/root/intro/arch_overview/dynamo.rst b/docs/root/intro/arch_overview/dynamo.rst new file mode 100644 index 000000000000..d757fe5aa42d --- /dev/null +++ b/docs/root/intro/arch_overview/dynamo.rst @@ -0,0 +1,18 @@ +.. _arch_overview_dynamo: + +DynamoDB +======== + +Envoy supports an HTTP level DynamoDB sniffing filter with the following features: + +* DynamoDB API request/response parser. +* DynamoDB per operation/per table/per partition and operation statistics. +* Failure type statistics for 4xx responses, parsed from response JSON, + e.g., ProvisionedThroughputExceededException. +* Batch operation partial failure statistics. + +The DynamoDB filter is a good example of Envoy’s extensibility and core abstractions at the HTTP +layer. At Lyft we use this filter for all application communication with DynamoDB. It provides an +invaluable source of data agnostic to the application platform and specific AWS SDK in use. + +DynamoDB filter :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/global_rate_limiting.rst b/docs/root/intro/arch_overview/global_rate_limiting.rst new file mode 100644 index 000000000000..b15ef05c1414 --- /dev/null +++ b/docs/root/intro/arch_overview/global_rate_limiting.rst @@ -0,0 +1,31 @@ +.. _arch_overview_rate_limit: + +Global rate limiting +==================== + +Although distributed :ref:`circuit breaking ` is generally extremely +effective in controlling throughput in distributed systems, there are times when it is not very +effective and global rate limiting is desired. The most common case is when a large number of hosts +are forwarding to a small number of hosts and the average request latency is low (e.g., +connections/requests to a database server). If the target hosts become backed up, the downstream +hosts will overwhelm the upstream cluster. In this scenario it is extremely difficult to configure a +tight enough circuit breaking limit on each downstream host such that the system will operate +normally during typical request patterns but still prevent cascading failure when the system starts +to fail. Global rate limiting is a good solution for this case. + +Envoy integrates directly with a global gRPC rate limiting service. Although any service that +implements the defined RPC/IDL protocol can be used, Lyft provides a `reference implementation `_ +written in Go which uses a Redis backend. Envoy’s rate limit integration has the following features: + +* **Network level rate limit filter**: Envoy will call the rate limit service for every new + connection on the listener where the filter is installed. The configuration specifies a specific + domain and descriptor set to rate limit on. This has the ultimate effect of rate limiting the + connections per second that transit the listener. :ref:`Configuration reference + `. +* **HTTP level rate limit filter**: Envoy will call the rate limit service for every new request on + the listener where the filter is installed and where the route table specifies that the global + rate limit service should be called. All requests to the target upstream cluster as well as all + requests from the originating cluster to the target cluster can be rate limited. + :ref:`Configuration reference ` + +Rate limit service :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/grpc.rst b/docs/root/intro/arch_overview/grpc.rst new file mode 100644 index 000000000000..57deaf043e51 --- /dev/null +++ b/docs/root/intro/arch_overview/grpc.rst @@ -0,0 +1,68 @@ +.. _arch_overview_grpc: + +gRPC +==== + +`gRPC `_ is an RPC framework from Google. It uses protocol buffers as the +underlying serialization/IDL format. At the transport layer it uses HTTP/2 for request/response +multiplexing. Envoy has first class support for gRPC both at the transport layer as well as at the +application layer: + +* gRPC makes use of HTTP/2 trailers to convey request status. Envoy is one of very few HTTP proxies + that correctly supports HTTP/2 trailers and is thus one of the few proxies that can transport + gRPC requests and responses. +* The gRPC runtime for some languages is relatively immature. Envoy supports a gRPC :ref:`bridge + filter ` that allows gRPC requests to be sent to Envoy over + HTTP/1.1. Envoy then translates the requests to HTTP/2 for transport to the target server. + The response is translated back to HTTP/1.1. +* When installed, the bridge filter gathers per RPC statistics in addition to the standard array + of global HTTP statistics. +* gRPC-Web is supported by a :ref:`filter ` that allows a gRPC-Web + client to send requests to Envoy over HTTP/1.1 and get proxied to a gRPC server. It's under + active development and is expected to be the successor to the gRPC :ref:`bridge filter + `. +* gRPC-JSON transcoder is supported by a :ref:`filter ` + that allows a RESTful JSON API client to send requests to Envoy over HTTP and get proxied to a + gRPC service. + +.. _arch_overview_grpc_services: + +gRPC services +------------- + +In addition to proxying gRPC on the data plane, Envoy make use of gRPC for its +control plane, where it :ref:`fetches configuration from management server(s) +` and also in filters, for example for :ref:`rate limiting +` or authorization checks. We refer to these as +*gRPC services*. + +When specifying gRPC services, it's necessary to specify the use of either the +:ref:`Envoy gRPC client ` or the +:ref:`Google C++ gRPC client `. We +discuss the tradeoffs in this choice below. + +The Envoy gRPC client is a minimal custom implementation of gRPC that makes use +of Envoy's HTTP/2 upstream connection management. Services are specified as +regular Envoy :ref:`clusters `, with regular +treatment of :ref:`timeouts, retries `, endpoint +:ref:`discovery `/:ref:`load +balancing/failover `/load reporting, :ref:`circuit +breaking `, :ref:`health checks +`, :ref:`outlier detection +`. They share the same :ref:`connection pooling +` mechanism as the Envoy data plane. Similarly, cluster +:ref:`statistics ` are available for gRPC services. +Since the client is minimal, it does not include advanced gRPC features such as +`OAuth2 `_ or `gRPC-LB +`_ lookaside. + +The Google C++ gRPC client is based on the reference implementation of gRPC +provided by Google at https://github.com/grpc/grpc. It provides advanced gRPC +features that are missing in the Envoy gRPC client. The Google C++ gRPC client +performs its own load balancing, retries, timeouts, endpoint management, etc, +independent of Envoy's cluster management. + +It is recommended to use the Envoy gRPC client in most cases, where the advanced +features in the Google C++ gRPC client are not required. This provides +configuration and monitoring simplicity. Where necessary features are missing +in the Envoy gRPC client, the Google C++ gRPC client should be used instead. diff --git a/docs/root/intro/arch_overview/health_checking.rst b/docs/root/intro/arch_overview/health_checking.rst new file mode 100644 index 000000000000..2f44702a06bc --- /dev/null +++ b/docs/root/intro/arch_overview/health_checking.rst @@ -0,0 +1,106 @@ +.. _arch_overview_health_checking: + +Health checking +=============== + +Active health checking can be :ref:`configured ` on a per +upstream cluster basis. As described in the :ref:`service discovery +` section, active health checking and the SDS service discovery +type go hand in hand. However, there are other scenarios where active health checking is desired +even when using the other service discovery types. Envoy supports three different types of health +checking along with various settings (check interval, failures required before marking a host +unhealthy, successes required before marking a host healthy, etc.): + +* **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. It + expects a 200 response if the host is healthy. The upstream host can return 503 if it wants to + immediately notify downstream hosts to no longer forward traffic to it. +* **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the + upstream host. It expects the byte buffer to be echoed in the response if the host is to be + considered healthy. Envoy also supports connect only L3/L4 health checking. +* **Redis**: Envoy will send a Redis PING command and expect a PONG response. The upstream Redis + server can respond with anything other than PONG to cause an immediate active health check + failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist + it is considered a passing healthcheck. This allows the user to mark a Redis instance for + maintenance by setting the specified key to any value and waiting for traffic to drain. See + :ref:`redis_key `. + +Passive health checking +----------------------- + +Envoy also supports passive health checking via :ref:`outlier detection +`. + +Connection pool interactions +---------------------------- + +See :ref:`here ` for more information. + +.. _arch_overview_health_checking_filter: + +HTTP health checking filter +--------------------------- + +When an Envoy mesh is deployed with active health checking between clusters, a large amount of +health checking traffic can be generated. Envoy includes an HTTP health checking filter that can be +installed in a configured HTTP listener. This filter is capable of a few different modes of +operation: + +* **No pass through**: In this mode, the health check request is never passed to the local service. + Envoy will respond with a 200 or a 503 depending on the current draining state of the server. +* **No pass through, computed from upstream cluster health**: In this mode, the health checking + filter will return a 200 or a 503 depending on whether at least a :ref:`specified percentage + ` of the + servers are healthy in one or more upstream clusters. (If the Envoy server is in a draining + state, though, it will respond with a 503 regardless of the upstream cluster health.) +* **Pass through**: In this mode, Envoy will pass every health check request to the local service. + The service is expected to return a 200 or a 503 depending on its health state. +* **Pass through with caching**: In this mode, Envoy will pass health check requests to the local + service, but then cache the result for some period of time. Subsequent health check requests will + return the cached value up to the cache time. When the cache time is reached, the next health + check request will be passed to the local service. This is the recommended mode of operation when + operating a large mesh. Envoy uses persistent connections for health checking traffic and health + check requests have very little cost to Envoy itself. Thus, this mode of operation yields an + eventually consistent view of the health state of each upstream host without overwhelming the + local service with a large number of health check requests. + +Further reading: + +* Health check filter :ref:`configuration `. +* :ref:`/healthcheck/fail ` admin endpoint. +* :ref:`/healthcheck/ok ` admin endpoint. + +Active health checking fast failure +----------------------------------- + +When using active health checking along with passive health checking (:ref:`outlier detection +`), it is common to use a long health checking interval to avoid a +large amount of active health checking traffic. In this case, it is still useful to be able to +quickly drain an upstream host when using the :ref:`/healthcheck/fail +` admin endpoint. To support this, the :ref:`router +filter ` will respond to the :ref:`x-envoy-immediate-health-check-fail +` header. If this header is set by +an upstream host, Envoy will immediately mark the host as being failed for active health check. Note +that this only occurs if the host's cluster has active health checking :ref:`configured +`. The :ref:`health checking filter +` will automatically set this header if Envoy has been marked as +failed via the :ref:`/healthcheck/fail ` admin +endpoint. + +.. _arch_overview_health_checking_identity: + +Health check identity +--------------------- + +Just verifying that an upstream host responds to a particular health check URL does not necessarily +mean that the upstream host is valid. For example, when using eventually consistent service +discovery in a cloud auto scaling or container environment, it's possible for a host to go away and +then come back with the same IP address, but as a different host type. One solution to this problem +is having a different HTTP health checking URL for every service type. The downside of that approach +is that overall configuration becomes more complicated as every health check URL is fully custom. + +The Envoy HTTP health checker supports the :ref:`service_name +` option. If this option is set, the health checker +additionally compares the value of the *x-envoy-upstream-healthchecked-cluster* response header to +*service_name*. If the values do not match, the health check does not pass. The upstream health +check filter appends *x-envoy-upstream-healthchecked-cluster* to the response headers. The appended +value is determined by the :option:`--service-cluster` command line option. diff --git a/docs/root/intro/arch_overview/hot_restart.rst b/docs/root/intro/arch_overview/hot_restart.rst new file mode 100644 index 000000000000..0add1f3fb2f1 --- /dev/null +++ b/docs/root/intro/arch_overview/hot_restart.rst @@ -0,0 +1,28 @@ +.. _arch_overview_hot_restart: + +Hot restart +=========== + +Ease of operation is one of the primary goals of Envoy. In addition to robust statistics and a local +administration interface, Envoy has the ability to “hot” or “live” restart itself. This means that +Envoy can fully reload itself (both code and configuration) without dropping any connections. The +hot restart functionality has the following general architecture: + +* Statistics and some locks are kept in a shared memory region. This means that gauges will be + consistent across both processes as restart is taking place. +* The two active processes communicate with each other over unix domain sockets using a basic RPC + protocol. +* The new process fully initializes itself (loads the configuration, does an initial service + discovery and health checking phase, etc.) before it asks for copies of the listen sockets from + the old process. The new process starts listening and then tells the old process to start + draining. +* During the draining phase, the old process attempts to gracefully close existing connections. How + this is done depends on the configured filters. The drain time is configurable via the + :option:`--drain-time-s` option and as more time passes draining becomes more aggressive. +* After drain sequence, the new Envoy process tells the old Envoy process to shut itself down. + This time is configurable via the :option:`--parent-shutdown-time-s` option. +* Envoy’s hot restart support was designed so that it will work correctly even if the new Envoy + process and the old Envoy process are running inside different containers. Communication between + the processes takes place only using unix domain sockets. +* An example restarter/parent process written in Python is included in the source distribution. This + parent process is usable with standard process control utilities such as monit/runit/etc. diff --git a/docs/root/intro/arch_overview/http_connection_management.rst b/docs/root/intro/arch_overview/http_connection_management.rst new file mode 100644 index 000000000000..4f1d415b48e3 --- /dev/null +++ b/docs/root/intro/arch_overview/http_connection_management.rst @@ -0,0 +1,44 @@ +.. _arch_overview_http_conn_man: + +HTTP connection management +========================== + +HTTP is such a critical component of modern service oriented architectures that Envoy implements a +large amount of HTTP specific functionality. Envoy has a built in network level filter called the +:ref:`HTTP connection manager `. This filter translates raw bytes into HTTP +level messages and events (e.g., headers received, body data received, trailers received, etc.). It +also handles functionality common to all HTTP connections and requests such as :ref:`access logging +`, :ref:`request ID generation and tracing `, +:ref:`request/response header manipulation `, :ref:`route table +` management, and :ref:`statistics `. + +HTTP connection manager :ref:`configuration `. + +.. _arch_overview_http_protocols: + +HTTP protocols +-------------- + +Envoy’s HTTP connection manager has native support for HTTP/1.1, WebSockets, and HTTP/2. It does not support +SPDY. Envoy’s HTTP support was designed to first and foremost be an HTTP/2 multiplexing proxy. +Internally, HTTP/2 terminology is used to describe system components. For example, an HTTP request +and response take place on a *stream*. A codec API is used to translate from different wire +protocols into a protocol agnostic form for streams, requests, responses, etc. In the case of +HTTP/1.1, the codec translates the serial/pipelining capabilities of the protocol into something +that looks like HTTP/2 to higher layers. This means that the majority of the code does not need to +understand whether a stream originated on an HTTP/1.1 or HTTP/2 connection. + +HTTP header sanitizing +---------------------- + +The HTTP connection manager performs various :ref:`header sanitizing +` actions for security reasons. + +Route table configuration +------------------------- + +Each :ref:`HTTP connection manager filter ` has an associated :ref:`route +table `. The route table can be specified in one of two ways: + +* Statically. +* Dynamically via the :ref:`RDS API `. diff --git a/docs/root/intro/arch_overview/http_filters.rst b/docs/root/intro/arch_overview/http_filters.rst new file mode 100644 index 000000000000..c672f97cf5b3 --- /dev/null +++ b/docs/root/intro/arch_overview/http_filters.rst @@ -0,0 +1,24 @@ +.. _arch_overview_http_filters: + +HTTP filters +============ + +Much like the :ref:`network level filter ` stack, Envoy supports an +HTTP level filter stack within the connection manager. Filters can be written that operate on HTTP +level messages without knowledge of the underlying physical protocol (HTTP/1.1, HTTP/2, etc.) or +multiplexing capabilities. There are three types of HTTP level filters: + +* **Decoder**: Decoder filters are invoked when the connection manager is decoding parts of the + request stream (headers, body, and trailers). +* **Encoder**: Encoder filters are invoked when the connection manager is about to encode parts of + the response stream (headers, body, and trailers). +* **Decoder/Encoder**: Decoder/Encoder filters are invoked both when the connection manager is + decoding parts of the request stream and when the connection manager is about to encode parts of + the response stream. + +The API for HTTP level filters allows the filters to operate without knowledge of the underlying +protocol. Like network level filters, HTTP filters can stop and continue iteration to subsequent +filters. This allows for more complex scenarios such as health check handling, calling a rate +limiting service, buffering, routing, generating statistics for application traffic such as +DynamoDB, etc. Envoy already includes several HTTP level filters that are documented in this +architecture overview as well as the :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/http_routing.rst b/docs/root/intro/arch_overview/http_routing.rst new file mode 100644 index 000000000000..2d7924da330b --- /dev/null +++ b/docs/root/intro/arch_overview/http_routing.rst @@ -0,0 +1,126 @@ +.. _arch_overview_http_routing: + +HTTP routing +============ + +Envoy includes an HTTP :ref:`router filter ` which can be installed to +perform advanced routing tasks. This is useful both for handling edge traffic (traditional reverse +proxy request handling) as well as for building a service to service Envoy mesh (typically via +routing on the host/authority HTTP header to reach a particular upstream service cluster). Envoy +also has the ability to be configured as forward proxy. In the forward proxy configuration, mesh +clients can participate by appropriately configuring their http proxy to be an Envoy. At a high +level the router takes an incoming HTTP request, matches it to an upstream cluster, acquires a +:ref:`connection pool ` to a host in the upstream cluster, and forwards the +request. The router filter supports the following features: + +* Virtual hosts that map domains/authorities to a set of routing rules. +* Prefix and exact path matching rules (both :ref:`case sensitive + ` and case insensitive). Regex/slug + matching is not currently supported, mainly because it makes it difficult/impossible to + programmatically determine whether routing rules conflict with each other. For this reason we + don’t recommend regex/slug routing at the reverse proxy level, however we may add support in the + future depending on demand. +* :ref:`TLS redirection ` at the virtual host + level. +* :ref:`Path `/:ref:`host + ` redirection at the route level. +* :ref:`Direct (non-proxied) HTTP responses ` + at the route level. +* :ref:`Explicit host rewriting `. +* :ref:`Automatic host rewriting ` based on + the DNS name of the selected upstream host. +* :ref:`Prefix rewriting `. +* :ref:`Websocket upgrades ` at route level. +* :ref:`Request retries ` specified either via HTTP header or via + route configuration. +* Request timeout specified either via :ref:`HTTP + header ` or via :ref:`route configuration + `. +* Traffic shifting from one upstream cluster to another via :ref:`runtime values + ` (see :ref:`traffic shifting/splitting + `). +* Traffic splitting across multiple upstream clusters using :ref:`weight/percentage-based routing + ` (see :ref:`traffic shifting/splitting + `). +* Arbitrary header matching :ref:`routing rules `. +* Virtual cluster specifications. A virtual cluster is specified at the virtual host level and is + used by Envoy to generate additional statistics on top of the standard cluster level ones. Virtual + clusters can use regex matching. +* :ref:`Priority ` based routing. +* :ref:`Hash policy ` based routing. +* :ref:`Absolute urls ` are supported for non-tls forward proxies. + +Route table +----------- + +The :ref:`configuration ` for the HTTP connection manager owns the :ref:`route +table ` that is used by all configured HTTP filters. Although the +router filter is the primary consumer of the route table, other filters also have access in case +they want to make decisions based on the ultimate destination of the request. For example, the built +in rate limit filter consults the route table to determine whether the global rate limit service +should be called based on the route. The connection manager makes sure that all calls to acquire a +route are stable for a particular request, even if the decision involves randomness (e.g. in the +case of a runtime configuration route rule). + +.. _arch_overview_http_routing_retry: + +Retry semantics +--------------- + +Envoy allows retries to be configured both in the :ref:`route configuration +` as well as for specific requests via :ref:`request +headers `. The following configurations are possible: + +* **Maximum number of retries**: Envoy will continue to retry any number of times. An exponential + backoff algorithm is used between each retry. Additionally, *all retries are contained within the + overall request timeout*. This avoids long request times due to a large number of retries. +* **Retry conditions**: Envoy can retry on different types of conditions depending on application + requirements. For example, network failure, all 5xx response codes, idempotent 4xx response codes, + etc. + +Note that retries may be disabled depending on the contents of the :ref:`x-envoy-overloaded +`. + +.. _arch_overview_http_routing_priority: + +Priority routing +---------------- + +Envoy supports priority routing at the :ref:`route ` level. +The current priority implementation uses different :ref:`connection pool ` +and :ref:`circuit breaking ` settings for each +priority level. This means that even for HTTP/2 requests, two physical connections will be used to +an upstream host. In the future Envoy will likely support true HTTP/2 priority over a single +connection. + +The currently supported priorities are *default* and *high*. + +.. _arch_overview_http_routing_direct_response: + +Direct responses +---------------- + +Envoy supports the sending of "direct" responses. These are preconfigured HTTP responses +that do not require proxying to an upstream server. + +There are two ways to specify a direct response in a Route: + +* Set the :ref:`direct_response ` field. + This works for all HTTP response statuses. +* Set the :ref:`redirect ` field. This works for + redirect response statuses only, but it simplifies the setting of the *Location* header. + +A direct response has an HTTP status code and an optional body. The Route configuration +can specify the response body inline or specify the pathname of a file containing the +body. If the Route configuration specifies a file pathname, Envoy will read the file +upon configuration load and cache the contents. + +.. attention:: + + If a response body is specified, it must be no more than 4KB in size, regardless of + whether it is provided inline or in a file. Envoy currently holds the entirety of the + body in memory, so the 4KB limit is intended to keep the proxy's memory footprint + from growing too large. + +If **response_headers_to_add** has been set for the Route or the enclosing Virtual Host, +Envoy will include the specified headers in the direct HTTP response. diff --git a/docs/root/intro/arch_overview/init.rst b/docs/root/intro/arch_overview/init.rst new file mode 100644 index 000000000000..0e32aa3961c8 --- /dev/null +++ b/docs/root/intro/arch_overview/init.rst @@ -0,0 +1,24 @@ +Initialization +============== + +How Envoy initializes itself when it starts up is complex. This section explains at a high level +how the process works. All of the following happens before any listeners start listening and +accepting new connections. + +* During startup, the :ref:`cluster manager ` goes through a + multi-phase initialization where it first initializes static/DNS clusters, then predefined + :ref:`SDS ` clusters. Then it initializes + :ref:`CDS ` if applicable, waits for one response (or failure), + and does the same primary/secondary initialization of CDS provided clusters. +* If clusters use :ref:`active health checking `, Envoy also does a + single active HC round. +* Once cluster manager initialization is done, :ref:`RDS ` and + :ref:`LDS ` initialize (if applicable). The server + doesn't start accepting connections until there has been at least one response (or failure) for + LDS/RDS requests. +* If LDS itself returns a listener that needs an RDS response, Envoy further waits until an RDS + response (or failure) is received. Note that this process takes place on every future listener + addition via LDS and is known as :ref:`listener warming `. +* After all of the previous steps have taken place, the listeners start accepting new connections. + This flow ensures that during hot restart the new process is fully capable of accepting and + processing new connections before the draining of the old process begins. diff --git a/docs/root/intro/arch_overview/listener_filters.rst b/docs/root/intro/arch_overview/listener_filters.rst new file mode 100644 index 000000000000..74635afa3eeb --- /dev/null +++ b/docs/root/intro/arch_overview/listener_filters.rst @@ -0,0 +1,16 @@ +.. _arch_overview_listener_filters: + +Listener filters +================ + +As discussed in the :ref:`listener ` section, listener filters may be +used to manipulate connection metadata. The main purpose of listener filters is to make adding +further system integration functions easier by not requiring changes to Envoy core functionality, +and also make interaction between multiple such features more explicit. + +The API for listener filters is relatively simple since ultimately these filters operate on newly +accepted sockets. Filters in the chain can stop and subsequently continue iteration to +further filters. This allows for more complex scenarios such as calling a :ref:`rate limiting +service `, etc. Envoy already includes several listener filters that +are documented in this architecture overview as well as the :ref:`configuration reference +`. diff --git a/docs/root/intro/arch_overview/listeners.rst b/docs/root/intro/arch_overview/listeners.rst new file mode 100644 index 000000000000..30717b38041f --- /dev/null +++ b/docs/root/intro/arch_overview/listeners.rst @@ -0,0 +1,28 @@ +.. _arch_overview_listeners: + +Listeners +========= + +The Envoy configuration supports any number of listeners within a single process. Generally we +recommend running a single Envoy per machine regardless of the number of configured listeners. This +allows for easier operation and a single source of statistics. Currently Envoy only supports TCP +listeners. + +Each listener is independently configured with some number of network level (L3/L4) :ref:`filters +`. When a new connection is received on a listener, the configured +connection local filter stack is instantiated and begins processing subsequent events. The generic +listener architecture is used to perform the vast majority of different proxy tasks that Envoy is +used for (e.g., :ref:`rate limiting `, :ref:`TLS client authentication +`, :ref:`HTTP connection management `, +MongoDB :ref:`sniffing `, raw :ref:`TCP proxy `, +etc.). + +Listeners are optionally also configured with some number of :ref:`listener filters +`. These filters are processed before the network level filters, +and have the opportunity to manipulate the connection metadata, usually to influence how the +connection is processed later filters or clusters. + +Listeners can also be fetched dynamically via the :ref:`listener discovery service (LDS) +`. + +Listener :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/load_balancing.rst b/docs/root/intro/arch_overview/load_balancing.rst new file mode 100644 index 000000000000..d5bb5fcd20da --- /dev/null +++ b/docs/root/intro/arch_overview/load_balancing.rst @@ -0,0 +1,477 @@ +.. _arch_overview_load_balancing: + +Load balancing +============== + +When a filter needs to acquire a connection to a host in an upstream cluster, the cluster manager +uses a load balancing policy to determine which host is selected. The load balancing policies are +pluggable and are specified on a per upstream cluster basis in the :ref:`configuration +`. Note that if no active health checking policy is :ref:`configured +` for a cluster, all upstream cluster members are considered +healthy. + +.. _arch_overview_load_balancing_types: + +Supported load balancers +------------------------ + +.. _arch_overview_load_balancing_types_round_robin: + +Round robin +^^^^^^^^^^^ + +This is a simple policy in which each healthy upstream host is selected in round +robin order. If :ref:`weights +` are assigned to +endpoints in a locality, then a weighted round robin schedule is used, where +higher weighted endpoints will appear more often in the rotation to achieve the +effective weighting. + +.. _arch_overview_load_balancing_types_least_request: + +Weighted least request +^^^^^^^^^^^^^^^^^^^^^^ + +The least request load balancer uses an O(1) algorithm which selects two random healthy hosts and +picks the host which has fewer active requests +(`Research `_ has shown that this +approach is nearly as good as an O(N) full scan). If any host in the cluster has a load balancing +weight greater than 1, the load balancer shifts into a mode where it randomly picks a host and then +uses that host times. This algorithm is simple and sufficient for load testing. It should +not be used where true weighted least request behavior is desired (generally if request durations +are variable and long in length). We may add a true full scan weighted least request variant in the +future to cover this use case. + +.. _arch_overview_load_balancing_types_ring_hash: + +Ring hash +^^^^^^^^^ + +The ring/modulo hash load balancer implements consistent hashing to upstream hosts. The algorithm is +based on mapping all hosts onto a circle such that the addition or removal of a host from the host +set changes only affect 1/N requests. This technique is also commonly known as `"ketama" +`_ hashing. A consistent hashing load balancer is only effective +when protocol routing is used that specifies a value to hash on. The minimum ring size governs the +replication factor for each host in the ring. For example, if the minimum ring size is 1024 and +there are 16 hosts, each host will be replicated 64 times. The ring hash load balancer does not +currently support weighting. + +When priority based load balancing is in use, the priority level is also chosen by hash, so the +endpoint selected will still be consistent when the set of backends is stable. + +.. note:: + + The ring hash load balancer does not support :ref:`locality weighted load + balancing `. + +.. _arch_overview_load_balancing_types_maglev: + +Maglev +^^^^^^ + +The Maglev load balancer implements consistent hashing to upstream hosts. It uses the algorithm +described in section 3.4 of `this paper `_ +with a fixed table size of 65537 (see section 5.3 of the same paper). Maglev can be used as a drop +in replacement for the :ref:`ring hash load balancer ` +any place in which consistent hashing is desired. Like the ring hash load balancer, a consistent +hashing load balancer is only effective when protocol routing is used that specifies a value to +hash on. + +In general, when compared to the ring hash ("ketama") algorithm, Maglev has substantially faster +table lookup build times as well as host selection times (approximately 10x and 5x respectively +when using a large ring size of 256K entries). The downside of Maglev is that it is not as stable +as ring hash. More keys will move position when hosts are removed (simulations show approximately +double the keys will move). With that said, for many applications including Redis, Maglev is very +likely a superior drop in replacement for ring hash. The advanced reader can use +:repo:`this benchmark ` to compare ring hash +versus Maglev with different parameters. + + +.. _arch_overview_load_balancing_types_random: + +Random +^^^^^^ + +The random load balancer selects a random healthy host. The random load balancer generally performs +better than round robin if no health checking policy is configured. Random selection avoids bias +towards the host in the set that comes after a failed host. + +.. _arch_overview_load_balancing_types_original_destination: + +Original destination +^^^^^^^^^^^^^^^^^^^^ + +This is a special purpose load balancer that can only be used with :ref:`an original destination +cluster `. Upstream host is selected +based on the downstream connection metadata, i.e., connections are opened to the same address as the +destination address of the incoming connection was before the connection was redirected to +Envoy. New destinations are added to the cluster by the load balancer on-demand, and the cluster +:ref:`periodically ` cleans out unused hosts +from the cluster. No other :ref:`load balancing type ` can +be used with original destination clusters. + +.. _arch_overview_load_balancing_panic_threshold: + +Panic threshold +--------------- + +During load balancing, Envoy will generally only consider healthy hosts in an upstream cluster. +However, if the percentage of healthy hosts in the cluster becomes too low, Envoy will disregard +health status and balance amongst all hosts. This is known as the *panic threshold*. The default +panic threshold is 50%. This is :ref:`configurable ` via +runtime as well as in the :ref:`cluster configuration +`. The panic threshold +is used to avoid a situation in which host failures cascade throughout the cluster as load +increases. + +.. _arch_overview_load_balancing_priority_levels: + +Priority levels +------------------ + +During load balancing, Envoy will generally only consider hosts configured at the highest priority +level. For each EDS :ref:`LocalityLbEndpoints` an optional +priority may also be specified. When endpoints at the highest priority level (P=0) are healthy, all +traffic will land on endpoints in that priority level. As endpoints for the highest priority level +become unhealthy, traffic will begin to trickle to lower priority levels. + +Currently, it is assumed that each priority level is over-provisioned by a (hard-coded) factor of +1.4. So if 80% of the endpoints are healthy, the priority level is still considered healthy because +80*1.4 > 100. As the number of healthy endpoints dips below 72%, the health of the priority level +goes below 100. At that point the percent of traffic equivalent to the health of P=0 will go to P=0 +and remaining traffic will flow to P=1. + +Assume a simple set-up with 2 priority levels, P=1 100% healthy. + ++----------------------------+---------------------------+----------------------------+ +| P=0 healthy endpoints | Percent of traffic to P=0 | Percent of traffic to P=1 | ++============================+===========================+============================+ +| 100% | 100% | 0% | ++----------------------------+---------------------------+----------------------------+ +| 72% | 100% | 0% | ++----------------------------+---------------------------+----------------------------+ +| 71% | 99% | 1% | ++----------------------------+---------------------------+----------------------------+ +| 50% | 70% | 30% | ++----------------------------+---------------------------+----------------------------+ +| 25% | 35% | 65% | ++----------------------------+---------------------------+----------------------------+ +| 0% | 0% | 100% | ++----------------------------+---------------------------+----------------------------+ + +If P=1 becomes unhealthy, it will continue to take spilled load from P=0 until the sum of the health +P=0 + P=1 goes below 100. At this point the healths will be scaled up to an "effective" health of +100%. + ++------------------------+-------------------------+-----------------+-----------------+ +| P=0 healthy endpoints | P=1 healthy endpoints | Traffic to P=0 | Traffic to P=1 | ++========================+=========================+=================+=================+ +| 100% | 100% | 100% | 0% | ++------------------------+-------------------------+-----------------+-----------------+ +| 72% | 72% | 100% | 0% | ++------------------------+-------------------------+-----------------+-----------------+ +| 71% | 71% | 99% | 1% | ++------------------------+-------------------------+-----------------+-----------------+ +| 50% | 50% | 70% | 30% | ++------------------------+-------------------------+-----------------+-----------------+ +| 25% | 100% | 35% | 65% | ++------------------------+-------------------------+-----------------+-----------------+ +| 25% | 25% | 50% | 50% | ++------------------------+-------------------------+-----------------+-----------------+ + +As more priorities are added, each level consumes load equal to its "scaled" effective health, so +P=2 would only receive traffic if the combined health of P=0 + P=1 was less than 100. + ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ +| P=0 healthy endpoints | P=1 healthy endpoints | P=2 healthy endpoints | Traffic to P=0 | Traffic to P=1 | Traffic to P=2 | ++=======================+=======================+=======================+================+================+================+ +| 100% | 100% | 100% | 100% | 0% | 0% | ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ +| 72% | 72% | 100% | 100% | 0% | 0% | ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ +| 71% | 71% | 100% | 99% | 1% | 0% | ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ +| 50% | 50% | 100% | 70% | 30% | 0% | ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ +| 25% | 100% | 100% | 35% | 65% | 0% | ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ +| 25% | 25% | 100% | 25% | 25% | 50% | ++-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ + +To sum this up in pseudo algorithms: + +:: + + load to P_0 = min(100, health(P_0) * 100 / total_health) + health(P_X) = 140 * healthy_P_X_backends / total_P_X_backends + total_health = min(100, Σ(health(P_0)...health(P_X)) + load to P_X = 100 - Σ(percent_load(P_0)..percent_load(P_X-1)) + +.. _arch_overview_load_balancing_zone_aware_routing: + +Zone aware routing +------------------ + +We use the following terminology: + +* **Originating/Upstream cluster**: Envoy routes requests from an originating cluster to an upstream + cluster. +* **Local zone**: The same zone that contains a subset of hosts in both the originating and + upstream clusters. +* **Zone aware routing**: Best effort routing of requests to an upstream cluster host in the local + zone. + +In deployments where hosts in originating and upstream clusters belong to different zones +Envoy performs zone aware routing. There are several preconditions before zone aware routing can be +performed: + +.. _arch_overview_load_balancing_zone_aware_routing_preconditions: + +* Both originating and upstream cluster are not in + :ref:`panic mode `. +* Zone aware :ref:`routing is enabled `. +* The originating cluster has the same number of zones as the upstream cluster. +* The upstream cluster has enough hosts. See + :ref:`here ` for more information. + +The purpose of zone aware routing is to send as much traffic to the local zone in the upstream +cluster as possible while roughly maintaining the same number of requests per second across all +upstream hosts (depending on load balancing policy). + +Envoy tries to push as much traffic as possible to the local upstream zone as long as +roughly the same number of requests per host in the upstream cluster are maintained. The decision of +whether Envoy routes to the local zone or performs cross zone routing depends on the percentage of +healthy hosts in the originating cluster and upstream cluster in the local zone. There are two cases +with regard to percentage relations in the local zone between originating and upstream clusters: + +* The originating cluster local zone percentage is greater than the one in the upstream cluster. + In this case we cannot route all requests from the local zone of the originating cluster to the + local zone of the upstream cluster because that will lead to request imbalance across all upstream + hosts. Instead, Envoy calculates the percentage of requests that can be routed directly to the + local zone of the upstream cluster. The rest of the requests are routed cross zone. The specific + zone is selected based on the residual capacity of the zone (that zone will get some local zone + traffic and may have additional capacity Envoy can use for cross zone traffic). +* The originating cluster local zone percentage is smaller than the one in upstream cluster. + In this case the local zone of the upstream cluster can get all of the requests from the + local zone of the originating cluster and also have some space to allow traffic from other zones + in the originating cluster (if needed). + +Note that when using multiple priorities, zone aware routing is currently only supported for P=0. + +.. _arch_overview_load_balancing_locality_weighted_lb: + +Locality weighted load balancing +-------------------------------- + +Another approach to determining how to weight assignments across different zones +and geographical locations is by using explicit weights supplied via EDS in the +:ref:`LocalityLbEndpoints ` message. +This approach is mutually exclusive with the above zone aware routing, since in +the case of locality aware LB, we rely on the management server to provide the +locality weighting, rather than the Envoy-side heuristics used in zone aware +routing. + +When all endpoints are healthy, the locality is picked using a weighted +round-robin schedule, where the locality weight is used for weighting. When some +endpoints in a locality are unhealthy, we adjust the locality weight to reflect +this. As with :ref:`priority levels +`, we assume an over-provision +factor (currently hardcoded at 1.4), which means we do not perform any weight +adjustment when only a small number of endpoints in a locality are unhealthy. + +Assume a simple set-up with 2 localities X and Y, where X has a locality weight +of 1 and Y has a locality weight of 2, L=Y 100% healthy. + ++----------------------------+---------------------------+----------------------------+ +| L=X healthy endpoints | Percent of traffic to L=X | Percent of traffic to L=Y | ++============================+===========================+============================+ +| 100% | 33% | 67% | ++----------------------------+---------------------------+----------------------------+ +| 70% | 33% | 67% | ++----------------------------+---------------------------+----------------------------+ +| 69% | 32% | 68% | ++----------------------------+---------------------------+----------------------------+ +| 50% | 26% | 74% | ++----------------------------+---------------------------+----------------------------+ +| 25% | 15% | 85% | ++----------------------------+---------------------------+----------------------------+ +| 0% | 0% | 100% | ++----------------------------+---------------------------+----------------------------+ + + +To sum this up in pseudo algorithms: + +:: + + health(L_X) = 140 * healthy_X_backends / total_X_backends + effective_weight(L_X) = locality_weight_X * min(100, health(L_X)) + load to L_X = effective_weight(L_X) / Σ_c(effective_weight(L_c)) + +Note that the locality weighted pick takes place after the priority level is +picked. The load balancer follows these steps: + +1. Pick :ref:`priority level `. +2. Pick locality (as described in this section) within priority level from (1). +3. Pick endpoint using cluster specified load balancer within locality from (2). + +Locality weighted load balancing is configured by setting +:ref:`locality_weighted_lb_config +` in the +cluster configuration and providing weights in :ref:`LocalityLbEndpoints +` via :ref:`load_balancing_weight +`. + +This feature is not compatible with :ref:`load balancer subsetting +`, since it is not straightforward to +reconcile locality level weighting with sensible weights for individual subsets. + +.. _arch_overview_load_balancer_subsets: + +Load Balancer Subsets +--------------------- + +Envoy may be configured to divide hosts within an upstream cluster into subsets based on metadata +attached to the hosts. Routes may then specify the metadata that a host must match in order to be +selected by the load balancer, with the option of falling back to a predefined set of hosts, +including any host. + +Subsets use the load balancer policy specified by the cluster. The original destination policy may +not be used with subsets because the upstream hosts are not known in advance. Subsets are compatible +with zone aware routing, but be aware that the use of subsets may easily violate the minimum hosts +condition described above. + +If subsets are :ref:`configured ` and a route +specifies no metadata or no subset matching the metadata exists, the subset load balancer initiates +its fallback policy. The default policy is ``NO_ENDPOINT``, in which case the request fails as if +the cluster had no hosts. Conversely, the ``ANY_ENDPOINT`` fallback policy load balances across all +hosts in the cluster, without regard to host metadata. Finally, the ``DEFAULT_SUBSET`` causes +fallback to load balance among hosts that match a specific set of metadata. + +Subsets must be predefined to allow the subset load balancer to efficiently select the correct +subset of hosts. Each definition is a set of keys, which translates to zero or more +subsets. Conceptually, each host that has a metadata value for all of the keys in a definition is +added to a subset specific to its key-value pairs. If no host has all the keys, no subsets result +from the definition. Multiple definitions may be provided, and a single host may appear in multiple +subsets if it matches multiple definitions. + +During routing, the route's metadata match configuration is used to find a specific subset. If there +is a subset with the exact keys and values specified by the route, the subset is used for load +balancing. Otherwise, the fallback policy is used. The cluster's subset configuration must, +therefore, contain a definition that has the same keys as a given route in order for subset load +balancing to occur. + +This feature can only be enabled using the V2 configuration API. Furthermore, host metadata is only +supported when using the EDS discovery type for clusters. Host metadata for subset load balancing +must be placed under the filter name ``"envoy.lb"``. Similarly, route metadata match criteria use +the ``"envoy.lb"`` filter name. Host metadata may be hierarchical (e.g., the value for a top-level +key may be a structured value or list), but the subset load balancer only compares top-level keys +and values. Therefore when using structured values, a route's match criteria will only match if an +identical structured value appears in the host's metadata. + +Examples +^^^^^^^^ + +We'll use simple metadata where all values are strings. Assume the following hosts are defined and +associated with a cluster: + +====== ====================== +Host Metadata +====== ====================== +host1 v: 1.0, stage: prod +host2 v: 1.0, stage: prod +host3 v: 1.1, stage: canary +host4 v: 1.2-pre, stage: dev +====== ====================== + +The cluster may enable subset load balancing like this: + +:: + + --- + name: cluster-name + type: EDS + eds_cluster_config: + eds_config: + path: '.../eds.conf' + connect_timeout: + seconds: 10 + lb_policy: LEAST_REQUEST + lb_subset_config: + fallback_policy: DEFAULT_SUBSET + default_subset: + stage: prod + subset_selectors: + - keys: + - v + - stage + - keys: + - stage + +The following table describes some routes and the result of their application to the +cluster. Typically the match criteria would be used with routes matching specific aspects of the +request, such as the path or header information. + +====================== ============= ========================================== +Match Criteria Balances Over Reason +====================== ============= ========================================== +stage: canary host3 Subset of hosts selected +v: 1.2-pre, stage: dev host4 Subset of hosts selected +v: 1.0 host1, host2 Fallback: No subset selector for "v" alone +other: x host1, host2 Fallback: No subset selector for "other" +(none) host1, host2 Fallback: No subset requested +====================== ============= ========================================== + +Metadata match criteria may also be specified on a route's weighted clusters. Metadata match +criteria from the selected weighted cluster are merged with and override the criteria from the +route: + +==================== =============================== ==================== +Route Match Criteria Weighted Cluster Match Criteria Final Match Criteria +==================== =============================== ==================== +stage: canary stage: prod stage: prod +v: 1.0 stage: prod v: 1.0, stage: prod +v: 1.0, stage: prod stage: canary v: 1.0, stage: canary +v: 1.0, stage: prod v: 1.1, stage: canary v: 1.1, stage: canary +(none) v: 1.0 v: 1.0 +v: 1.0 (none) v: 1.0 +==================== =============================== ==================== + + +Example Host With Metadata +************************** + +An EDS ``LbEndpoint`` with host metadata: + +:: + + --- + endpoint: + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 8888 + metadata: + filter_metadata: + envoy.lb: + version: '1.0' + stage: 'prod' + + +Example Route With Metadata Criteria +************************************ + +An RDS ``Route`` with metadata match criteria: + +:: + + --- + match: + prefix: / + route: + cluster: cluster-name + metadata_match: + filter_metadata: + envoy.lb: + version: '1.0' + stage: 'prod' diff --git a/docs/root/intro/arch_overview/mongo.rst b/docs/root/intro/arch_overview/mongo.rst new file mode 100644 index 000000000000..6ae713ea2087 --- /dev/null +++ b/docs/root/intro/arch_overview/mongo.rst @@ -0,0 +1,19 @@ +.. _arch_overview_mongo: + +MongoDB +======= + +Envoy supports a network level MongoDB sniffing filter with the following features: + +* MongoDB wire format BSON parser. +* Detailed MongoDB query/operation statistics including timings and scatter/multi-get counts for + routed clusters. +* Query logging. +* Per callsite statistics via the $comment query parameter. +* Fault injection. + +The MongoDB filter is a good example of Envoy’s extensibility and core abstractions. At Lyft we use +this filter between all applications and our databases. It provides an invaluable source of data +that is agnostic to the application platform and specific MongoDB driver in use. + +MongoDB proxy filter :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/network_filters.rst b/docs/root/intro/arch_overview/network_filters.rst new file mode 100644 index 000000000000..8848e10b401d --- /dev/null +++ b/docs/root/intro/arch_overview/network_filters.rst @@ -0,0 +1,22 @@ +.. _arch_overview_network_filters: + +Network (L3/L4) filters +======================= + +As discussed in the :ref:`listener ` section, network level (L3/L4) filters +form the core of Envoy connection handling. The filter API allows for different sets of filters to +be mixed and matched and attached to a given listener. There are three different types of network +filters: + +* **Read**: Read filters are invoked when Envoy receives data from a downstream connection. +* **Write**: Write filters are invoked when Envoy is about to send data to a downstream connection. +* **Read/Write**: Read/Write filters are invoked both when Envoy receives data from a downstream + connection and when it is about to send data to a downstream connection. + +The API for network level filters is relatively simple since ultimately the filters operate on raw +bytes and a small number of connection events (e.g., TLS handshake complete, connection disconnected +locally or remotely, etc.). Filters in the chain can stop and subsequently continue iteration to +further filters. This allows for more complex scenarios such as calling a :ref:`rate limiting +service `, etc. Envoy already includes several network level filters that +are documented in this architecture overview as well as the :ref:`configuration reference +`. diff --git a/docs/root/intro/arch_overview/outlier.rst b/docs/root/intro/arch_overview/outlier.rst new file mode 100644 index 000000000000..e85606def5f5 --- /dev/null +++ b/docs/root/intro/arch_overview/outlier.rst @@ -0,0 +1,149 @@ +.. _arch_overview_outlier_detection: + +Outlier detection +================= + +Outlier detection and ejection is the process of dynamically determining whether some number of +hosts in an upstream cluster are performing unlike the others and removing them from the healthy +:ref:`load balancing ` set. Performance might be along different axes +such as consecutive failures, temporal success rate, temporal latency, etc. Outlier detection is a +form of *passive* health checking. Envoy also supports :ref:`active health checking +`. *Passive* and *active* health checking can be enabled together or +independently, and form the basis for an overall upstream health checking solution. + +Ejection algorithm +------------------ + +Depending on the type of outlier detection, ejection either runs inline (for example in the case of +consecutive 5xx) or at a specified interval (for example in the case of periodic success rate). The +ejection algorithm works as follows: + +#. A host is determined to be an outlier. +#. Envoy checks to make sure the number of ejected hosts is below the allowed threshold (specified + via the :ref:`outlier_detection.max_ejection_percent + ` setting). + If the number of ejected hosts is above the threshold the host is not ejected. +#. The host is ejected for some number of milliseconds. Ejection means that the host is marked + unhealthy and will not be used during load balancing unless the load balancer is in a + :ref:`panic ` scenario. The number of milliseconds + is equal to the :ref:`outlier_detection.base_ejection_time_ms + ` value + multiplied by the number of times the host has been ejected. This causes hosts to get ejected + for longer and longer periods if they continue to fail. +#. An ejected host will automatically be brought back into service after the ejection time has + been satisfied. Generally, outlier detection is used alongside :ref:`active health checking + ` for a comprehensive health checking solution. + +Detection types +--------------- + +Envoy supports the following outlier detection types: + +Consecutive 5xx +^^^^^^^^^^^^^^^ + +If an upstream host returns some number of consecutive 5xx, it will be ejected. Note that in this +case a 5xx means an actual 5xx respond code, or an event that would cause the HTTP router to return +one on the upstream's behalf (reset, connection failure, etc.). The number of consecutive 5xx +required for ejection is controlled by the :ref:`outlier_detection.consecutive_5xx +` value. + +Consecutive Gateway Failure +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If an upstream host returns some number of consecutive "gateway errors" (502, 503 or 504 status +code), it will be ejected. Note that this includes events that would cause the HTTP router to +return one of these status codes on the upstream's behalf (reset, connection failure, etc.). The +number of consecutive gateway failures required for ejection is controlled by +the :ref:`outlier_detection.consecutive_gateway_failure +` value. + +Success Rate +^^^^^^^^^^^^ + +Success Rate based outlier ejection aggregates success rate data from every host in a cluster. Then at given +intervals ejects hosts based on statistical outlier detection. Success Rate outlier ejection will not be +calculated for a host if its request volume over the aggregation interval is less than the +:ref:`outlier_detection.success_rate_request_volume` +value. Moreover, detection will not be performed for a cluster if the number of hosts +with the minimum required request volume in an interval is less than the +:ref:`outlier_detection.success_rate_minimum_hosts` +value. + +Ejection event logging +---------------------- + +A log of outlier ejection events can optionally be produced by Envoy. This is extremely useful +during daily operations since global stats do not provide enough information on which hosts are +being ejected and for what reasons. The log uses a JSON format with one object per line: + +.. code-block:: json + + { + "time": "...", + "secs_since_last_action": "...", + "cluster": "...", + "upstream_url": "...", + "action": "...", + "type": "...", + "num_ejections": "...", + "enforced": "...", + "host_success_rate": "...", + "cluster_success_rate_average": "...", + "cluster_success_rate_ejection_threshold": "..." + } + +time + The time that the event took place. + +secs_since_last_action + The time in seconds since the last action (either an ejection or unejection) + took place. This value will be ``-1`` for the first ejection given there is no + action before the first ejection. + +cluster + The :ref:`cluster ` that owns the ejected host. + +upstream_url + The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. + +action + The action that took place. Either ``eject`` if a host was ejected or ``uneject`` if it was + brought back into service. + +type + If ``action`` is ``eject``, specifies the type of ejection that took place. Currently type can + be one of ``5xx``, ``GatewayFailure`` or ``SuccessRate``. + +num_ejections + If ``action`` is ``eject``, specifies the number of times the host has been ejected + (local to that Envoy and gets reset if the host gets removed from the upstream cluster for any + reason and then re-added). + +enforced + If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was ejected. + ``false`` means the event was logged but the host was not actually ejected. + +host_success_rate + If ``action`` is ``eject``, and ``type`` is ``SuccessRate``, specifies the host's success rate + at the time of the ejection event on a ``0-100`` range. + +.. _arch_overview_outlier_detection_ejection_event_logging_cluster_success_rate_average: + +cluster_success_rate_average + If ``action`` is ``eject``, and ``type`` is ``SuccessRate``, specifies the average success + rate of the hosts in the cluster at the time of the ejection event on a ``0-100`` range. + +.. _arch_overview_outlier_detection_ejection_event_logging_cluster_success_rate_ejection_threshold: + +cluster_success_rate_ejection_threshold + If ``action`` is ``eject``, and ``type`` is ``SuccessRate``, specifies success rate ejection + threshold at the time of the ejection event. + +Configuration reference +----------------------- + +* Cluster manager :ref:`global configuration ` +* Per cluster :ref:`configuration ` +* Runtime :ref:`settings ` +* Statistics :ref:`reference ` diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst new file mode 100644 index 000000000000..b93830edba16 --- /dev/null +++ b/docs/root/intro/arch_overview/redis.rst @@ -0,0 +1,213 @@ +.. _arch_overview_redis: + +Redis +======= + +Envoy can act as a Redis proxy, partitioning commands among instances in a cluster. +In this mode, the goals of Envoy are to maintain availability and partition tolerance +over consistency. This is the key point when comparing Envoy to `Redis Cluster +`_. Envoy is designed as a best-effort cache, +meaning that it will not try to reconcile inconsistent data or keep a globally consistent +view of cluster membership. + +The Redis project offers a thorough reference on partitioning as it relates to Redis. See +"`Partitioning: how to split data among multiple Redis instances +`_". + +**Features of Envoy Redis**: + +* `Redis protocol `_ codec. +* Hash-based partitioning. +* Ketama distribution. +* Detailed command statistics. +* Active and passive healthchecking. + +**Planned future enhancements**: + +* Additional timing stats. +* Circuit breaking. +* Request collapsing for fragmented commands. +* Replication. +* Built-in retry. +* Tracing. +* Hash tagging. + +.. _arch_overview_redis_configuration: + +Configuration +------------- + +For filter configuration details, see the Redis proxy filter +:ref:`configuration reference `. + +The corresponding cluster definition should be configured with +:ref:`ring hash load balancing `. + +If active healthchecking is desired, the cluster should be configured with a +:ref:`Redis healthcheck `. + +If passive healthchecking is desired, also configure +:ref:`outlier detection `. + +For the purposes of passive healthchecking, connect timeouts, command timeouts, and connection +close map to 5xx. All other responses from Redis are counted as a success. + +Supported commands +------------------ + +At the protocol level, pipelines are supported. MULTI (transaction block) is not. +Use pipelining wherever possible for the best performance. + +At the command level, Envoy only supports commands that can be reliably hashed to a server. PING +is the only exception, which Envoy responds to immediately with PONG. Arguments to PING are not +allowed. All other supported commands must contain a key. Supported commands are functionally +identical to the original Redis command except possibly in failure scenarios. + +For details on each command's usage see the official +`Redis command reference `_. + +.. csv-table:: + :header: Command, Group + :widths: 1, 1 + + PING, Connection + DEL, Generic + DUMP, Generic + EXISTS, Generic + EXPIRE, Generic + EXPIREAT, Generic + PERSIST, Generic + PEXPIRE, Generic + PEXPIREAT, Generic + PTTL, Generic + RESTORE, Generic + TOUCH, Generic + TTL, Generic + TYPE, Generic + UNLINK, Generic + GEOADD, Geo + GEODIST, Geo + GEOHASH, Geo + GEOPOS, Geo + GEORADIUS_RO, Geo + GEORADIUSBYMEMBER_RO, Geo + HDEL, Hash + HEXISTS, Hash + HGET, Hash + HGETALL, Hash + HINCRBY, Hash + HINCRBYFLOAT, Hash + HKEYS, Hash + HLEN, Hash + HMGET, Hash + HMSET, Hash + HSCAN, Hash + HSET, Hash + HSETNX, Hash + HSTRLEN, Hash + HVALS, Hash + LINDEX, List + LINSERT, List + LLEN, List + LPOP, List + LPUSH, List + LPUSHX, List + LRANGE, List + LREM, List + LSET, List + LTRIM, List + RPOP, List + RPUSH, List + RPUSHX, List + EVAL, Scripting + EVALSHA, Scripting + SADD, Set + SCARD, Set + SISMEMBER, Set + SMEMBERS, Set + SPOP, Set + SRANDMEMBER, Set + SREM, Set + SSCAN, Set + ZADD, Sorted Set + ZCARD, Sorted Set + ZCOUNT, Sorted Set + ZINCRBY, Sorted Set + ZLEXCOUNT, Sorted Set + ZRANGE, Sorted Set + ZRANGEBYLEX, Sorted Set + ZRANGEBYSCORE, Sorted Set + ZRANK, Sorted Set + ZREM, Sorted Set + ZREMRANGEBYLEX, Sorted Set + ZREMRANGEBYRANK, Sorted Set + ZREMRANGEBYSCORE, Sorted Set + ZREVRANGE, Sorted Set + ZREVRANGEBYLEX, Sorted Set + ZREVRANGEBYSCORE, Sorted Set + ZREVRANK, Sorted Set + ZSCAN, Sorted Set + ZSCORE, Sorted Set + APPEND, String + BITCOUNT, String + BITFIELD, String + BITPOS, String + DECR, String + DECRBY, String + GET, String + GETBIT, String + GETRANGE, String + GETSET, String + INCR, String + INCRBY, String + INCRBYFLOAT, String + MGET, String + MSET, String + PSETEX, String + SET, String + SETBIT, String + SETEX, String + SETNX, String + SETRANGE, String + STRLEN, String + +Failure modes +------------- + +If Redis throws an error, we pass that error along as the response to the command. Envoy treats a +response from Redis with the error datatype as a normal response and passes it through to the +caller. + +Envoy can also generate its own errors in response to the client. + +.. csv-table:: + :header: Error, Meaning + :widths: 1, 1 + + no upstream host, "The ring hash load balancer did not have a healthy host available at the + ring position chosen for the key." + upstream failure, "The backend did not respond within the timeout period or closed + the connection." + invalid request, "Command was rejected by the first stage of the command splitter due to + datatype or length." + unsupported command, "The command was not recognized by Envoy and therefore cannot be serviced + because it cannot be hashed to a backend server." + finished with n errors, "Fragmented commands which sum the response (e.g. DEL) will return the + total number of errors received if any were received." + upstream protocol error, "A fragmented command received an unexpected datatype or a backend + responded with a response that not conform to the Redis protocol." + wrong number of arguments for command, "Certain commands check in Envoy that the number of + arguments is correct." + +In the case of MGET, each individual key that cannot be fetched will generate an error response. +For example, if we fetch five keys and two of the keys' backends time out, we would get an error +response for each in place of the value. + +.. code-block:: none + + $ redis-cli MGET a b c d e + 1) "alpha" + 2) "bravo" + 3) (error) upstream failure + 4) (error) upstream failure + 5) "echo" diff --git a/docs/root/intro/arch_overview/runtime.rst b/docs/root/intro/arch_overview/runtime.rst new file mode 100644 index 000000000000..677cb764afc5 --- /dev/null +++ b/docs/root/intro/arch_overview/runtime.rst @@ -0,0 +1,16 @@ +.. _arch_overview_runtime: + +Runtime configuration +===================== + +Envoy supports “runtime” configuration (also known as "feature flags" and "decider"). Configuration +settings can be altered that will affect operation without needing to restart Envoy or change the +primary configuration. The currently supported implementation uses a tree of file system files. +Envoy watches for a symbolic link swap in a configured directory and reloads the tree when that +happens. This type of system is very commonly deployed in large distributed systems. Other +implementations would not be difficult to implement. Supported runtime configuration settings are +documented in the relevant sections of the operations guide. Envoy will operate correctly with +default runtime values and a “null” provider so it is not required that such a system exists to run +Envoy. + +Runtime :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/scripting.rst b/docs/root/intro/arch_overview/scripting.rst new file mode 100644 index 000000000000..bbc3751a53e2 --- /dev/null +++ b/docs/root/intro/arch_overview/scripting.rst @@ -0,0 +1,5 @@ +Scripting +========= + +Envoy supports experimental `Lua `_ scripting as part of a dedicated +:ref:`HTTP filter `. diff --git a/docs/root/intro/arch_overview/service_discovery.rst b/docs/root/intro/arch_overview/service_discovery.rst new file mode 100644 index 000000000000..ae69b505a162 --- /dev/null +++ b/docs/root/intro/arch_overview/service_discovery.rst @@ -0,0 +1,136 @@ +.. _arch_overview_service_discovery: + +Service discovery +================= + +When an upstream cluster is defined in the :ref:`configuration `, +Envoy needs to know how to resolve the members of the cluster. This is known as *service discovery*. + +.. _arch_overview_service_discovery_types: + +Supported service discovery types +--------------------------------- + +.. _arch_overview_service_discovery_types_static: + +Static +^^^^^^ + +Static is the simplest service discovery type. The configuration explicitly specifies the resolved +network name (IP address/port, unix domain socket, etc.) of each upstream host. + +.. _arch_overview_service_discovery_types_strict_dns: + +Strict DNS +^^^^^^^^^^ + +When using strict DNS service discovery, Envoy will continuously and asynchronously resolve the +specified DNS targets. Each returned IP address in the DNS result will be considered an explicit +host in the upstream cluster. This means that if the query returns three IP addresses, Envoy will +assume the cluster has three hosts, and all three should be load balanced to. If a host is removed +from the result Envoy assumes it no longer exists and will drain traffic from any existing +connection pools. Note that Envoy never synchronously resolves DNS in the forwarding path. At the +expense of eventual consistency, there is never a worry of blocking on a long running DNS query. + +.. _arch_overview_service_discovery_types_logical_dns: + +Logical DNS +^^^^^^^^^^^ + +Logical DNS uses a similar asynchronous resolution mechanism to strict DNS. However, instead of +strictly taking the results of the DNS query and assuming that they comprise the entire upstream +cluster, a logical DNS cluster only uses the first IP address returned *when a new connection needs +to be initiated*. Thus, a single logical connection pool may contain physical connections to a +variety of different upstream hosts. Connections are never drained. This service discovery type is +optimal for large scale web services that must be accessed via DNS. Such services typically use +round robin DNS to return many different IP addresses. Typically a different result is returned for +each query. If strict DNS were used in this scenario, Envoy would assume that the cluster’s members +were changing during every resolution interval which would lead to draining connection pools, +connection cycling, etc. Instead, with logical DNS, connections stay alive until they get cycled. +When interacting with large scale web services, this is the best of all possible worlds: +asynchronous/eventually consistent DNS resolution, long lived connections, and zero blocking in the +forwarding path. + +.. _arch_overview_service_discovery_types_original_destination: + +Original destination +^^^^^^^^^^^^^^^^^^^^ + +Original destination cluster can be used when incoming connections are redirected to Envoy either +via an iptables REDIRECT or TPROXY target or with Proxy Protocol. In these cases requests routed +to an original destination cluster are forwarded to upstream hosts as addressed by the redirection +metadata, without any explicit host configuration or upstream host discovery. Connections to +upstream hosts are pooled and unused hosts are flushed out when they have been idle longer than +:ref:`*cleanup_interval_ms* `, which defaults to +5000ms. If the original destination address is is not available, no upstream connection is opened. +Original destination service discovery must be used with the original destination :ref:`load +balancer `. + +.. _arch_overview_service_discovery_types_sds: + +Service discovery service (SDS) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The *service discovery service* is a generic :ref:`REST based API ` +used by Envoy to fetch cluster members. Lyft provides a reference implementation via the Python +`discovery service `_. That implementation uses AWS DynamoDB as +the backing store, however the API is simple enough that it could easily be implemented on top of a +variety of different backing stores. For each SDS cluster, Envoy will periodically fetch the cluster +members from the discovery service. SDS is the preferred service discovery mechanism for a few +reasons: + +* Envoy has explicit knowledge of each upstream host (vs. routing through a DNS resolved load + balancer) and can make more intelligent load balancing decisions. +* Extra attributes carried in the discovery API response for each host inform Envoy of the host’s + load balancing weight, canary status, zone, etc. These additional attributes are used globally + by the Envoy mesh during load balancing, statistic gathering, etc. + +Generally active health checking is used in conjunction with the eventually consistent service +discovery service data to making load balancing and routing decisions. This is discussed further in +the following section. + +.. _arch_overview_service_discovery_eventually_consistent: + +On eventually consistent service discovery +------------------------------------------ + +Many existing RPC systems treat service discovery as a fully consistent process. To this end, they +use fully consistent leader election backing stores such as Zookeeper, etcd, Consul, etc. Our +experience has been that operating these backing stores at scale is painful. + +Envoy was designed from the beginning with the idea that service discovery does not require full +consistency. Instead, Envoy assumes that hosts come and go from the mesh in an eventually consistent +way. Our recommended way of deploying a service to service Envoy mesh configuration uses eventually +consistent service discovery along with :ref:`active health checking ` +(Envoy explicitly health checking upstream cluster members) to determine cluster health. This +paradigm has a number of benefits: + +* All health decisions are fully distributed. Thus, network partitions are gracefully handled + (whether the application gracefully handles the partition is a different story). +* When health checking is configured for an upstream cluster, Envoy uses a 2x2 matrix to determine + whether to route to a host: + +.. csv-table:: + :header: Discovery Status, HC OK, HC Failed + :widths: 1, 1, 2 + + Discovered, Route, Don't Route + Absent, Route, Don't Route / Delete + +Host discovered / health check OK + Envoy **will route** to the target host. + +Host absent / health check OK: + Envoy **will route** to the target host. This is very important since the design assumes that the + discovery service can fail at any time. If a host continues to pass health check even after becoming + absent from the discovery data, Envoy will still route. Although it would be impossible to add new + hosts in this scenario, existing hosts will continue to operate normally. When the discovery service + is operating normally again the data will eventually re-converge. + +Host discovered / health check FAIL + Envoy **will not route** to the target host. Health check data is assumed to be more accurate than + discovery data. + +Host absent / health check FAIL + Envoy **will not route and will delete** the target host. This + is the only state in which Envoy will purge host data. diff --git a/docs/root/intro/arch_overview/ssl.rst b/docs/root/intro/arch_overview/ssl.rst new file mode 100644 index 000000000000..cf0ede9bcc83 --- /dev/null +++ b/docs/root/intro/arch_overview/ssl.rst @@ -0,0 +1,97 @@ +.. _arch_overview_ssl: + +TLS +=== + +Envoy supports both :ref:`TLS termination ` in listeners as well as +:ref:`TLS origination ` when making connections to upstream +clusters. Support is sufficient for Envoy to perform standard edge proxy duties for modern web +services as well as to initiate connections with external services that have advanced TLS +requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: + +* **Configurable ciphers**: Each TLS listener and client can specify the ciphers that it supports. +* **Client certificates**: Upstream/client connections can present a client certificate in addition + to server certificate verification. +* **Certificate verification and pinning**: Certificate verification options include basic chain + verification, subject name verification, and hash pinning. +* **Certificate revocation**: Envoy can check peer certificates against a certificate revocation list + (CRL) if one is :ref:`provided `. +* **ALPN**: TLS listeners support ALPN. The HTTP connection manager uses this information (in + addition to protocol inference) to determine whether a client is speaking HTTP/1.1 or HTTP/2. +* **SNI**: SNI is supported for both server (listener) and client (upstream) connections. +* **Session resumption**: Server connections support resuming previous sessions via TLS session + tickets (see `RFC 5077 `_). Resumption can be performed + across hot restarts and between parallel Envoy instances (typically useful in a front proxy + configuration). + +Underlying implementation +------------------------- + +Currently Envoy is written to use `BoringSSL `_ as the +TLS provider. + +.. _arch_overview_ssl_enabling_verification: + +Enabling certificate verification +--------------------------------- + +Certificate verification of both upstream and downstream connections is not enabled unless the +validation context specifies one or more trusted authority certificates. + +Example configuration +^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: yaml + + static_resources: + listeners: + - name: listener_0 + address: { socket_address: { address: 127.0.0.1, port_value: 10000 } } + filter_chains: + - filters: + - name: envoy.http_connection_manager + # ... + tls_context: + common_tls_context: + validation_context: + trusted_ca: + filename: /usr/local/my-client-ca.crt + clusters: + - name: some_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + hosts: [{ socket_address: { address: 127.0.0.2, port_value: 1234 }}] + tls_context: + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt + +*/etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems. +This makes Envoy verify the server identity of *127.0.0.2:1234* in the same way as e.g. cURL does on +standard Debian installations. Common paths for system CA bundles on Linux and BSD are + +* /etc/ssl/certs/ca-certificates.crt (Debian/Ubuntu/Gentoo etc.) +* /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem (CentOS/RHEL 7) +* /etc/pki/tls/certs/ca-bundle.crt (Fedora/RHEL 6) +* /etc/ssl/ca-bundle.pem (OpenSUSE) +* /usr/local/etc/ssl/cert.pem (FreeBSD) +* /etc/ssl/cert.pem (OpenBSD) + +See the reference for :ref:`UpstreamTlsContexts ` and +:ref:`DownstreamTlsContexts ` for other TLS options. + +.. _arch_overview_ssl_auth_filter: + +Authentication filter +--------------------- + +Envoy provides a network filter that performs TLS client authentication via principals fetched from +a REST VPN service. This filter matches the presented client certificate hash against the principal +list to determine whether the connection should be allowed or not. Optional IP white listing can +also be configured. This functionality can be used to build edge proxy VPN support for web +infrastructure. + +Client TLS authentication filter :ref:`configuration reference +`. diff --git a/docs/root/intro/arch_overview/statistics.rst b/docs/root/intro/arch_overview/statistics.rst new file mode 100644 index 000000000000..8db8cea5c24d --- /dev/null +++ b/docs/root/intro/arch_overview/statistics.rst @@ -0,0 +1,42 @@ +.. _arch_overview_statistics: + +Statistics +========== + +One of the primary goals of Envoy is to make the network understandable. Envoy emits a large number +of statistics depending on how it is configured. Generally the statistics fall into three categories: + +* **Downstream**: Downstream statistics relate to incoming connections/requests. They are emitted by + listeners, the HTTP connection manager, the TCP proxy filter, etc. +* **Upstream**: Upstream statistics relate to outgoing connections/requests. They are emitted by + connection pools, the router filter, the TCP proxy filter, etc. +* **Server**: Server statistics describe how the Envoy server instance is working. Statistics like + server uptime or amount of allocated memory are categorized here. + +A single proxy scenario typically involves both downstream and upstream statistics. The two types +can be used to get a detailed picture of that particular network hop. Statistics from the entire +mesh give a very detailed picture of each hop and overall network health. The statistics emitted are +documented in detail in the operations guide. + +In the v1 API, Envoy only supports statsd as the statistics output format. Both TCP and UDP statsd +are supported. As of the v2 API, Envoy has the ability to support custom, pluggable sinks. :ref:`A +few standard sink implementations` are included in Envoy. +Some sinks also support emitting statistics with tags/dimensions. + +Within Envoy and throughout the documentation, statistics are identified by a canonical string +representation. The dynamic portions of these strings are stripped to become tags. Users can +configure this behavior via :ref:`the Tag Specifier configuration `. + +Envoy emits three types of values as statistics: + +* **Counters**: Unsigned integers that only increase and never decrease. E.g., total requests. +* **Gauges**: Unsigned integers that both increase and decrease. E.g., currently active requests. +* **Histograms**: Unsigned integers that are part of a stream of values that are then aggregated by + the collector to ultimately yield summarized percentile values. E.g., upstream request time. + +Internally, counters and gauges are batched and periodically flushed to improve performance. +Histograms are written as they are received. Note: what were previously referred to as timers have +become histograms as the only difference between the two representations was the units. + +* :ref:`v1 API reference `. +* :ref:`v2 API reference `. diff --git a/docs/root/intro/arch_overview/tcp_proxy.rst b/docs/root/intro/arch_overview/tcp_proxy.rst new file mode 100644 index 000000000000..6177a52d9322 --- /dev/null +++ b/docs/root/intro/arch_overview/tcp_proxy.rst @@ -0,0 +1,18 @@ +.. _arch_overview_tcp_proxy: + +TCP proxy +========= + +Since Envoy is fundamentally written as a L3/L4 server, basic L3/L4 proxy is easily implemented. The +TCP proxy filter performs basic 1:1 network connection proxy between downstream clients and upstream +clusters. It can be used by itself as an stunnel replacement, or in conjunction with other filters +such as the :ref:`MongoDB filter ` or the :ref:`rate limit +` filter. + +The TCP proxy filter will respect the +:ref:`connection limits ` +imposed by each upstream cluster's global resource manager. The TCP proxy filter checks with the +upstream cluster's resource manager if it can create a connection without going over that cluster's +maximum number of connections, if it can't the TCP proxy will not make the connection. + +TCP proxy filter :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/terminology.rst b/docs/root/intro/arch_overview/terminology.rst new file mode 100644 index 000000000000..8e63b3c8ab8f --- /dev/null +++ b/docs/root/intro/arch_overview/terminology.rst @@ -0,0 +1,32 @@ +Terminology +=========== + +A few definitions before we dive into the main architecture documentation. Some of the definitions +are slightly contentious within the industry, however they are how Envoy uses them throughout the +documentation and codebase, so *c'est la vie*. + +**Host**: An entity capable of network communication (application on a mobile phone, server, etc.). +In this documentation a host is a logical network application. A physical piece of hardware could +possibly have multiple hosts running on it as long as each of them can be independently addressed. + +**Downstream**: A downstream host connects to Envoy, sends requests, and receives responses. + +**Upstream**: An upstream host receives connections and requests from Envoy and returns responses. + +**Listener**: A listener is a named network location (e.g., port, unix domain socket, etc.) that can +be connected to by downstream clients. Envoy exposes one or more listeners that downstream hosts +connect to. + +**Cluster**: A cluster is a group of logically similar upstream hosts that Envoy connects to. Envoy +discovers the members of a cluster via :ref:`service discovery `. +It optionally determines the health of cluster members via :ref:`active health checking +`. The cluster member that Envoy routes a request to is determined +by the :ref:`load balancing policy `. + +**Mesh**: A group of hosts that coordinate to provide a consistent network topology. In this +documentation, an “Envoy mesh” is a group of Envoy proxies that form a message passing substrate for +a distributed system comprised of many different services and application platforms. + +**Runtime configuration**: Out of band realtime configuration system deployed alongside Envoy. +Configuration settings can be altered that will affect operation without needing to restart Envoy or +change the primary configuration. diff --git a/docs/root/intro/arch_overview/threading_model.rst b/docs/root/intro/arch_overview/threading_model.rst new file mode 100644 index 000000000000..a9d64c2cbbcf --- /dev/null +++ b/docs/root/intro/arch_overview/threading_model.rst @@ -0,0 +1,13 @@ +.. _arch_overview_threading: + +Threading model +=============== + +Envoy uses a single process with multiple threads architecture. A single *master* thread controls +various sporadic coordination tasks while some number of *worker* threads perform listening, +filtering, and forwarding. Once a connection is accepted by a listener, the connection spends the +rest of its lifetime bound to a single worker thread. This allows the majority of Envoy to be +largely single threaded (embarrassingly parallel) with a small amount of more complex code handling +coordination between the worker threads. Generally Envoy is written to be 100% non-blocking and for +most workloads we recommend configuring the number of worker threads to be equal to the number of +hardware threads on the machine. diff --git a/docs/root/intro/arch_overview/tracing.rst b/docs/root/intro/arch_overview/tracing.rst new file mode 100644 index 000000000000..033dbaf9f24e --- /dev/null +++ b/docs/root/intro/arch_overview/tracing.rst @@ -0,0 +1,102 @@ +.. _arch_overview_tracing: + +Tracing +======= + +Overview +-------- +Distributed tracing allows developers to obtain visualizations of call flows in large service +oriented architectures. It can be invaluable in understanding serialization, parallelism, and +sources of latency. Envoy supports three features related to system wide tracing: + +* **Request ID generation**: Envoy will generate UUIDs when needed and populate the + :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the + x-request-id header for unified logging as well as tracing. +* **External trace service integration**: Envoy supports pluggable external trace visualization + providers. Currently Envoy supports `LightStep `_, `Zipkin `_ + or any Zipkin compatible backends (e.g. `Jaeger `_). + However, support for other tracing providers would not be difficult to add. +* **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can + be used to join untrusted request IDs to the trusted internal + :ref:`config_http_conn_man_headers_x-request-id`. + +How to initiate a trace +----------------------- +The HTTP connection manager that handles the request must have the :ref:`tracing +` object set. There are several ways tracing can be +initiated: + +* By an external client via the :ref:`config_http_conn_man_headers_x-client-trace-id` + header. +* By an internal service via the :ref:`config_http_conn_man_headers_x-envoy-force-trace` + header. +* Randomly sampled via the :ref:`random_sampling ` + runtime setting. + +The router filter is also capable of creating a child span for egress calls via the +:ref:`start_child_span ` option. + +Trace context propagation +------------------------- +Envoy provides the capability for reporting tracing information regarding communications between +services in the mesh. However, to be able to correlate the pieces of tracing information generated +by the various proxies within a call flow, the services must propagate certain trace context between +the inbound and outbound requests. + +Whichever tracing provider is being used, the service should propagate the +:ref:`config_http_conn_man_headers_x-request-id` to enable logging across the invoked services +to be correlated. + +The tracing providers also require additional context, to enable the parent/child relationships +between the spans (logical units of work) to be understood. This can be achieved by using the +LightStep (via OpenTracing API) or Zipkin tracer directly within the service itself, to extract the +trace context from the inbound request and inject it into any subsequent outbound requests. This +approach would also enable the service to create additional spans, describing work being done +internally within the service, that may be useful when examining the end-to-end trace. + +Alternatively the trace context can be manually propagated by the service: + +* When using the LightStep tracer, Envoy relies on the service to propagate the + :ref:`config_http_conn_man_headers_x-ot-span-context` HTTP header + while sending HTTP requests to other services. + +* When using the Zipkin tracer, Envoy relies on the service to propagate the + B3 HTTP headers ( + :ref:`config_http_conn_man_headers_x-b3-traceid`, + :ref:`config_http_conn_man_headers_x-b3-spanid`, + :ref:`config_http_conn_man_headers_x-b3-parentspanid`, + :ref:`config_http_conn_man_headers_x-b3-sampled`, and + :ref:`config_http_conn_man_headers_x-b3-flags`). The :ref:`config_http_conn_man_headers_x-b3-sampled` + header can also be supplied by an external client to either enable or disable tracing for a particular + request. + +What data each trace contains +----------------------------- +An end-to-end trace is comprised of one or more spans. A +span represents a logical unit of work that has a start time and duration and can contain metadata +associated with it. Each span generated by Envoy contains the following data: + +* Originating service cluster set via :option:`--service-cluster`. +* Start time and duration of the request. +* Originating host set via :option:`--service-node`. +* Downstream cluster set via the :ref:`config_http_conn_man_headers_downstream-service-cluster` + header. +* HTTP URL. +* HTTP method. +* HTTP response code. +* Tracing system-specific metadata. + +The span also includes a name (or operation) which by default is defined as the host of the invoked +service. However this can be customized using a :ref:`config_http_conn_man_route_table_decorator` on +the route. The name can also be overridden using the +:ref:`config_http_filters_router_x-envoy-decorator-operation` header. + +Envoy automatically sends spans to tracing collectors. Depending on the tracing collector, +multiple spans are stitched together using common information such as the globally unique +request ID :ref:`config_http_conn_man_headers_x-request-id` (LightStep) or +the trace ID configuration (Zipkin). See + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +for more information on how to setup tracing in Envoy. diff --git a/docs/root/intro/arch_overview/websocket.rst b/docs/root/intro/arch_overview/websocket.rst new file mode 100644 index 000000000000..9d65b3b680e7 --- /dev/null +++ b/docs/root/intro/arch_overview/websocket.rst @@ -0,0 +1,36 @@ +.. _arch_overview_websocket: + +WebSocket support +================= + +Envoy supports upgrading a HTTP/1.1 connection to a WebSocket connection. +Connection upgrade will be allowed only if the downstream client +sends the correct upgrade headers and the matching HTTP route is explicitly +configured to use WebSockets +(:ref:`use_websocket `). +If a request arrives at a WebSocket enabled route without the requisite +upgrade headers, it will be treated as any regular HTTP/1.1 request. + +Since Envoy treats WebSocket connections as plain TCP connections, it +supports all drafts of the WebSocket protocol, independent of their wire +format. Certain HTTP request level features such as redirects, timeouts, +retries, rate limits and shadowing are not supported for WebSocket routes. +However, prefix rewriting, explicit and automatic host rewriting, traffic +shifting and splitting are supported. + +Connection semantics +-------------------- + +Even though WebSocket upgrades occur over HTTP/1.1 connections, WebSockets +proxying works similarly to plain TCP proxy, i.e., Envoy does not interpret +the websocket frames. The downstream client and/or the upstream server are +responsible for properly terminating the WebSocket connection +(e.g., by sending `close frames `_) +and the underlying TCP connection. + +When the connection manager receives a WebSocket upgrade request over a +WebSocket-enabled route, it forwards the request to an upstream server over a +TCP connection. Envoy will not know if the upstream server rejected the upgrade +request. It is the responsibility of the upstream server to terminate the TCP +connection, which would cause Envoy to terminate the corresponding downstream +client connection. diff --git a/docs/root/intro/comparison.rst b/docs/root/intro/comparison.rst new file mode 100644 index 000000000000..046d873bc210 --- /dev/null +++ b/docs/root/intro/comparison.rst @@ -0,0 +1,134 @@ +Comparison to similar systems +============================= + +Overall, we believe that Envoy has a unique and compelling feature set for modern service oriented +architectures. Below we compare Envoy to other related systems. Though in any particular area +(edge proxy, software load balancer, service message passing layer) Envoy may not be as feature +rich as some of the solutions below, in aggregate no other solution supplies the same set of +overall features into a single self contained and high performance package. + +**NOTE:** Most of the projects below are under active development. Thus some of the information may +become out of date. If that is the case please let us know and we will fix it. + +`nginx `_ +-------------------------------- + +nginx is the canonical modern web server. It supports serving static content, HTTP L7 reverse proxy +load balancing, HTTP/2, and many other features. nginx has far more overall features than Envoy as +an edge reverse proxy, though we think that most modern service oriented architectures don't +typically make use of them. Envoy provides the following main advantages over nginx as an edge +proxy: + +* Full HTTP/2 transparent proxy. Envoy supports HTTP/2 for both downstream and upstream + communication. nginx only supports HTTP/2 for downstream connections. +* Freely available advanced load balancing. Only nginx plus (the paid server) supports similar + advanced load balancing capabilities as Envoy. +* Ability to run the same software at the edge as well as on each service node. Many infrastructures + run a mix of nginx and haproxy. A single proxy solution at every hop is substantially simpler from + an operations perspective. + +`haproxy `_ +------------------------------------ + +haproxy is the canonical modern software load balancer. It also supports basic HTTP reverse proxy +features. Envoy provides the following main advantages over haproxy as a load balancer: + +* HTTP/2 support. +* Pluggable architecture. +* Integration with a remote service discovery service. +* Integration with a remote global rate limiting service. +* Substantially more detailed statistics. + +`AWS ELB `_ +--------------------------------------------------------- + +Amazon's ELB is the standard solution for service discovery and load balancing for applications in +EC2. Envoy provides the following main advantages of ELB as a load balancer and service discovery +system: + +* Statistics and logging (CloudWatch statistics are delayed and extremely lacking in detail, logs + must be retrieved from S3 and have a fixed format). +* Stability (it is common to see sporadic instability when using ELBs which ends up being impossible + to debug). +* Advanced load balancing and direct connection between nodes. An Envoy mesh avoids an additional + network hop via variably performing elastic hardware. The load balancer can make better decisions + and gather more interesting statistics based on zone, canary status, etc. The load balancer also + supports advanced features such as retry. + +AWS recently released the *application load balancer* product. This product adds HTTP/2 support as +well as basic HTTP L7 request routing to multiple backend clusters. The feature set is still small +compared to Envoy and performance and stability are unknown, but it's clear that AWS will continue +to invest in this area in the future. + +`SmartStack `_ +--------------------------------------------------------------------------- + +SmartStack is an interesting solution which provides additional service discovery and health +checking support on top of haproxy. At a high level, SmartStack has most of the same goals as +Envoy (out of process architecture, application platform agnostic, etc.). Envoy provides the +following main advantages over SmartStack as a load balancer and service discovery package: + +* All of the previously mentioned advantages over haproxy. +* Integrated service discovery and active health checking. Envoy provides everything in a single + high performance package. + +`Finagle `_ +----------------------------------------------- + +Finagle is Twitter's Scala/JVM service to service communication library. It is used by Twitter and +many other companies that have a primarily JVM based architecture. It has many of the same features +as Envoy such as service discovery, load balancing, filters, etc. Envoy provides the following main +advantages over Finagle as a load balancer and service discovery package: + +* Eventually consistent service discovery via distributed active health checking. +* Order of magnitude better performance across all metrics (memory consumption, CPU usage, and P99 + latency properties). +* Out of process and application agnostic architecture. Envoy works with any application stack. + +`proxygen `_ and `wangle `_ +----------------------------------------------------------------------------------------------------- + +proxygen is Facebook's high performance C++11 HTTP proxy library, written on top of a Finagle like +C++ library called wangle. From a code perspective, Envoy uses most of the same techniques as +proxygen to obtain high performance as an HTTP library/proxy. Beyond that however the two projects +are not really comparable as Envoy is a complete self contained server with a large feature set +versus a library that must be built into something by each project individually. + +`gRPC `_ +----------------------------- + +gRPC is a new multi-platform message passing system out of Google. It uses an IDL to describe an RPC +library and then implements application specific runtimes for a variety of different languages. The +underlying transport is HTTP/2. Although gRPC likely has the goal of implementing many Envoy like +features in the future (load balancing, etc.), as of this writing the various runtimes are somewhat +immature and are primarily focused on serialization/de-serialization. We consider gRPC to be a +companion to Envoy versus a competitor. How Envoy integrates with gRPC is described :ref:`here +`. + +`linkerd `_ +------------------------------------------------- + +linkerd is a standalone, open source RPC routing proxy built on Netty and Finagle (Scala/JVM). +linkerd offers many of Finagle’s features, including latency-aware load balancing, connection +pooling, circuit-breaking, retry budgets, deadlines, tracing, fine-grained instrumentation, and a +traffic routing layer for request-level routing. linkerd provides a pluggable service discovery +interface (with standard support for Consul and ZooKeeper, as well as the Marathon and Kubernetes +APIs). + +linkerd’s memory and CPU requirements are significantly higher than Envoy’s. In contrast to Envoy, +linkerd provides a minimalist configuration language, and explicitly does not support hot reloads, +relying instead on dynamic provisioning and service abstractions. linkerd supports HTTP/1.1, Thrift, +ThriftMux, HTTP/2 (experimental) and gRPC (experimental). + +`nghttp2 `_ +--------------------------------- + +nghttp2 is a project that contains a few different things. Primarily, it contains a library +(nghttp2) that implements the HTTP/2 protocol. Envoy uses this library (with a very thin wrapper +on top) for its HTTP/2 support. The project also contains a very useful load testing tool (h2load) +as well as a reverse proxy (nghttpx). From a comparison perspective, Envoy is most similar to +nghttpx. nghttpx is a transparent HTTP/1 <-> HTTP/2 reverse proxy, supports TLS termination, +correctly supports gRPC proxying, among a variety of other features. With that said, we consider +nghttpx to be an excellent example of a variety of proxy features, rather than a robust production +ready solution. Envoy's focus is much more targeted towards observability, general operational +agility, and advanced load balancing features. diff --git a/docs/root/intro/deployment_types/deployment_types.rst b/docs/root/intro/deployment_types/deployment_types.rst new file mode 100644 index 000000000000..889c9c87ebd6 --- /dev/null +++ b/docs/root/intro/deployment_types/deployment_types.rst @@ -0,0 +1,12 @@ +Deployment types +================ + +Envoy is usable in a variety of different scenarios, however it's most useful when deployed as a +*mesh* across all hosts in an infrastructure. This section describes three recommended deployment +types in increasing order of complexity. + +.. toctree:: + + service_to_service + front_proxy + double_proxy diff --git a/docs/root/intro/deployment_types/double_proxy.rst b/docs/root/intro/deployment_types/double_proxy.rst new file mode 100644 index 000000000000..fd2757747bf4 --- /dev/null +++ b/docs/root/intro/deployment_types/double_proxy.rst @@ -0,0 +1,26 @@ +.. _deployment_type_double_proxy: + +Service to service, front proxy, and double proxy +------------------------------------------------- + +.. image:: /_static/double_proxy.svg + :width: 70% + +The above diagram shows the :ref:`front proxy ` configuration alongside +another Envoy cluster running as a *double proxy*. The idea behind the double proxy is that it is +more efficient to terminate TLS and client connections as close as possible to the user (shorter +round trip times for the TLS handshake, faster TCP CWND expansion, less chance for packet loss, +etc.). Connections that terminate in the double proxy are then multiplexed onto long lived HTTP/2 +connections running in the main data center. + +In the above diagram, the front Envoy proxy running in region 1 authenticates itself with the front +Envoy proxy running in region 2 via TLS mutual authentication and pinned certificates. This allows +the front Envoy instances running in region 2 to trust elements of the incoming requests that +ordinarily would not be trustable (such as the x-forwarded-for HTTP header). + +Configuration template +^^^^^^^^^^^^^^^^^^^^^^ + +The source distribution includes an example double proxy configuration that is very similar to +the version that Lyft runs in production. See :ref:`here ` for more +information. diff --git a/docs/root/intro/deployment_types/front_proxy.rst b/docs/root/intro/deployment_types/front_proxy.rst new file mode 100644 index 000000000000..f89e8cb17da5 --- /dev/null +++ b/docs/root/intro/deployment_types/front_proxy.rst @@ -0,0 +1,26 @@ +.. _deployment_type_front_proxy: + +Service to service plus front proxy +----------------------------------- + +.. image:: /_static/front_proxy.svg + +The above diagram shows the :ref:`service to service ` +configuration sitting behind an Envoy cluster used as an HTTP L7 edge reverse proxy. The +reverse proxy provides the following features: + +* Terminates TLS. +* Supports both HTTP/1.1 and HTTP/2. +* Full HTTP L7 routing support. +* Talks to the service to service Envoy clusters via the standard :ref:`ingress port + ` and using the discovery service for host + lookup. Thus, the front Envoy hosts work identically to any other Envoy host, other than the + fact that they do not run collocated with another service. This means that are operated in the + same way and emit the same statistics. + +Configuration template +^^^^^^^^^^^^^^^^^^^^^^ + +The source distribution includes an example front proxy configuration that is very similar to +the version that Lyft runs in production. See :ref:`here ` for more +information. diff --git a/docs/root/intro/deployment_types/service_to_service.rst b/docs/root/intro/deployment_types/service_to_service.rst new file mode 100644 index 000000000000..9f16d8063e1e --- /dev/null +++ b/docs/root/intro/deployment_types/service_to_service.rst @@ -0,0 +1,62 @@ +.. _deployment_type_service_to_service: + +Service to service only +----------------------- + +.. image:: /_static/service_to_service.svg + :width: 60% + +The above diagram shows the simplest Envoy deployment which uses Envoy as a communication bus for +all traffic internal to a service oriented architecture (SOA). In this scenario, Envoy exposes +several listeners that are used for local origin traffic as well as service to service traffic. + +Service to service egress listener +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is the port used by applications to talk to other services in the infrastructure. For example, +*http://localhost:9001*. HTTP and gRPC requests use the HTTP/1.1 *host* header or the HTTP/2 +*:authority* header to indicate which remote cluster the request is destined for. Envoy handles +service discovery, load balancing, rate limiting, etc. depending on the details in the +configuration. Services only need to know about the local Envoy and do not need to concern +themselves with network topology, whether they are running in development or production, etc. + +This listener supports both HTTP/1.1 or HTTP/2 depending on the capabilities of the application. + +.. _deployment_type_service_to_service_ingress: + +Service to service ingress listener +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is the port used by remote Envoys when they want to talk to the local Envoy. For example, +*http://localhost:9211*. Incoming requests are routed to the local service on the configured +port(s). Multiple application ports may be involved depending on application or load balancing +needs (for example if the service needs both an HTTP port and a gRPC port). The local Envoy +performs buffering, circuit breaking, etc. as needed. + +Our default configurations use HTTP/2 for all Envoy to Envoy communication, regardless of whether +the application uses HTTP/1.1 or HTTP/2 when egressing out of a local Envoy. HTTP/2 provides +better performance via long lived connections and explicit reset notifications. + +Optional external service egress listeners +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Generally, an explicit egress port is used for each external service that a local service wants +to talk to. This is done because some external service SDKs do not easily support overriding the +*host* header to allow for standard HTTP reverse proxy behavior. For example, +*http://localhost:9250* might be allocated for connections destined for DynamoDB. Instead of using +*host* routing for some external services and dedicated local port routing for others, we recommend +being consistent and using local port routing for all external services. + +Discovery service integration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The recommended service to service configuration uses an external discovery service for all cluster +lookups. This provides Envoy with the most detailed information possible for use when performing +load balancing, statistics gathering, etc. + +Configuration template +^^^^^^^^^^^^^^^^^^^^^^ + +The source distribution includes an example service to service configuration that is very similar to +the version that Lyft runs in production. See :ref:`here ` for more +information. diff --git a/docs/root/intro/getting_help.rst b/docs/root/intro/getting_help.rst new file mode 100644 index 000000000000..8b9beb68004a --- /dev/null +++ b/docs/root/intro/getting_help.rst @@ -0,0 +1,15 @@ +.. _getting_help: + +Getting help +============ + +We are very interested in building a community around Envoy. Please reach out to us if you are +interested in using it and need help or want to contribute. + +Please see `contact info `_. + +Reporting security vulnerabilities +---------------------------------- + +Please see `security contact info +`_. diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst new file mode 100644 index 000000000000..014f89650d95 --- /dev/null +++ b/docs/root/intro/intro.rst @@ -0,0 +1,14 @@ +.. _intro: + +Introduction +============ + +.. toctree:: + :maxdepth: 2 + + what_is_envoy + arch_overview/arch_overview + deployment_types/deployment_types + comparison + getting_help + version_history diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst new file mode 100644 index 000000000000..dae201a645e9 --- /dev/null +++ b/docs/root/intro/version_history.rst @@ -0,0 +1,418 @@ +Version history +--------------- + +1.7.0 (Pending) +=============== + +* access log: ability to format START_TIME +* access log: added DYNAMIC_METADATA :ref:`access log formatter `. +* admin: added :http:get:`/config_dump` for dumping current configs +* admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format. +* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values +* admin: mutations must be sent as POSTs, rather than GETs. Mutations include: + :http:post:`/cpuprofiler`, :http:post:`/healthcheck/fail`, :http:post:`/healthcheck/ok`, + :http:post:`/logging`, :http:post:`/quitquitquit`, :http:post:`/reset_counters`, + :http:post:`/runtime_modify?key1=value1&key2=value2&keyN=valueN`, +* admin: removed `/routes` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. +* cli: added --config-yaml flag to the Envoy binary. When set its value is interpreted as a yaml + representation of the bootstrap config and overrides --config-path. +* health check: added ability to set :ref:`additional HTTP headers + ` for HTTP health check. +* health check: added support for EDS delivered :ref:`endpoint health status + `. +* health check: added interval overrides for health state transitions from :ref:`healthy to unhealthy + `, :ref:`unhealthy to healthy + ` and for subsequent checks on + :ref:`unhealthy hosts `. +* load balancing: added :ref:`weighted round robin + ` support. The round robin + scheduler now respects endpoint weights and also has improved fidelity across + picks. +* load balancer: :ref:`Locality weighted load balancing + ` is now supported. +* logger: added the ability to optionally set the log format via the :option:`--log-format` option. +* logger: all :ref:`logging levels ` can be configured + at run-time: trace debug info warning error critical. +* sockets: added `IP_FREEBIND` socket option support for :ref:`listeners + ` and upstream connections via + :ref:`cluster manager wide + ` and + :ref:`cluster specific ` options. +* sockets: added `IP_TRANSPARENT` socket option support for :ref:`listeners + `. +* tracing: the sampling decision is now delegated to the tracers, allowing the tracer to decide when and if + to use it. For example, if the :ref:`x-b3-sampled ` header + is supplied with the client request, its value will override any sampling decision made by the Envoy proxy. + +1.6.0 (March 20, 2018) +====================== + +* access log: added DOWNSTREAM_REMOTE_ADDRESS, DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, and + DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. + DOWNSTREAM_ADDRESS access log formatter has been deprecated. +* access log: added less than or equal (LE) :ref:`comparison filter + `. +* access log: added configuration to :ref:`runtime filter + ` to set default sampling rate, divisor, + and whether to use independent randomness or not. +* admin: added :ref:`/runtime ` admin endpoint to read the + current runtime values. +* build: added support for :repo:`building Envoy with exported symbols + `. This change allows scripts loaded with the Lua filter to + load shared object libraries such as those installed via `LuaRocks `_. +* config: added support for sending error details as + `grpc.rpc.Status `_ + in :ref:`DiscoveryRequest `. +* config: added support for :ref:`inline delivery ` of TLS + certificates and private keys. +* config: added restrictions for the backing :ref:`config sources ` + of xDS resources. For filesystem based xDS the file must exist at configuration time. For cluster + based xDS the backing cluster must be statically defined and be of non-EDS type. +* grpc: the Google gRPC C++ library client is now supported as specified in the :ref:`gRPC services + overview ` and :ref:`GrpcService `. +* grpc-json: Added support for :ref:`inline descriptors + `. +* health check: added :ref:`gRPC health check ` + based on `grpc.health.v1.Health `_ + service. +* health check: added ability to set :ref:`host header value + ` for http health check. +* health check: extended the health check filter to support computation of the health check response + based on the :ref:`percentage of healthy servers in upstream clusters + `. +* health check: added setting for :ref:`no-traffic + interval`. +* http : added idle timeout for :ref:`upstream http connections + `. +* http: added support for :ref:`proxying 100-Continue responses + `. +* http: added the ability to pass a URL encoded PEM encoded peer certificate in the + :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. +* http: added support for trusting additional hops in the + :ref:`config_http_conn_man_headers_x-forwarded-for` request header. +* http: added support for :ref:`incoming HTTP/1.0 + `. +* hot restart: added SIGTERM propagation to children to :ref:`hot-restarter.py + `, which enables using it as a parent of containers. +* ip tagging: added :ref:`HTTP IP Tagging filter`. +* listeners: added support for :ref:`listening for both IPv4 and IPv6 + ` when binding to ::. +* listeners: added support for listening on :ref:`UNIX domain sockets + `. +* listeners: added support for :ref:`abstract unix domain sockets ` on + Linux. The abstract namespace can be used by prepending '@' to a socket path. +* load balancer: added cluster configuration for :ref:`healthy panic threshold + ` percentage. +* load balancer: added :ref:`Maglev ` consistent hash + load balancer. +* load balancer: added support for + :ref:`LocalityLbEndpoints` priorities. +* lua: added headers :ref:`replace() ` API. +* lua: extended to support :ref:`metadata object ` API. +* redis: added local `PING` support to the :ref:`Redis filter `. +* redis: added `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` to the :ref:`Redis command splitter + ` whitelist. +* router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, + DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header + formatters `. The CLIENT_IP header formatter + has been deprecated. +* router: added gateway-error :ref:`retry-on ` policy. +* router: added support for route matching based on :ref:`URL query string parameters + `. +* router: added support for more granular weighted cluster routing by allowing the :ref:`total_weight + ` to be specified in configuration. +* router: added support for :ref:`custom request/response headers + ` with mixed static and dynamic values. +* router: added support for :ref:`direct responses `. + I.e., sending a preconfigured HTTP response without proxying anywhere. +* router: added support for :ref:`HTTPS redirects + ` on specific routes. +* router: added support for :ref:`prefix_rewrite + ` for redirects. +* router: added support for :ref:`stripping the query string + ` for redirects. +* router: added support for downstream request/upstream response + :ref:`header manipulation ` in :ref:`weighted + cluster `. +* router: added support for :ref:`range based header matching + ` for request routing. +* squash: added support for the :ref:`Squash microservices debugger `. + Allows debugging an incoming request to a microservice in the mesh. +* stats: added metrics service API implementation. +* stats: added native :ref:`DogStatsd ` support. +* stats: added support for :ref:`fixed stats tag values + ` which will be added to all metrics. +* tcp proxy: added support for specifying a :ref:`metadata matcher + ` for upstream + clusters in the tcp filter. +* tcp proxy: improved TCP proxy to correctly proxy TCP half-close. +* tcp proxy: added :ref:`idle timeout + `. +* tcp proxy: access logs now bring an IP address without a port when using DOWNSTREAM_ADDRESS. + Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. +* tracing: added support for dynamically loading an :ref:`OpenTracing tracer + `. +* tracing: when using the Zipkin tracer, it is now possible for clients to specify the sampling + decision (using the :ref:`x-b3-sampled ` header) and + have the decision propagated through to subsequently invoked services. +* tracing: when using the Zipkin tracer, it is no longer necessary to propagate the + :ref:`x-ot-span-context ` header. + See more on trace context propagation :ref:`here `. +* transport sockets: added transport socket interface to allow custom implementations of transport + sockets. A transport socket provides read and write logic with buffer encryption and decryption + (if applicable). The existing TLS implementation has been refactored with the interface. +* upstream: added support for specifying an :ref:`alternate stats name + ` while emitting stats for clusters. +* Many small bug fixes and performance improvements not listed. + +1.5.0 (December 4, 2017) +======================== + +* access log: added fields for :ref:`UPSTREAM_LOCAL_ADDRESS and DOWNSTREAM_ADDRESS + `. +* admin: added :ref:`JSON output ` for stats admin endpoint. +* admin: added basic :ref:`Prometheus output ` for stats admin + endpoint. Histograms are not currently output. +* admin: added ``version_info`` to the :ref:`/clusters admin endpoint`. +* config: the :ref:`v2 API ` is now considered production ready. +* config: added :option:`--v2-config-only` CLI flag. +* cors: added :ref:`CORS filter `. +* health check: added :ref:`x-envoy-immediate-health-check-fail + ` header support. +* health check: added :ref:`reuse_connection ` option. +* http: added :ref:`per-listener stats `. +* http: end-to-end HTTP flow control is now complete across both connections, streams, and filters. +* load balancer: added :ref:`subset load balancer `. +* load balancer: added ring size and hash :ref:`configuration options + `. This used to be configurable via runtime. The runtime + configuration was deleted without deprecation as we are fairly certain no one is using it. +* log: added the ability to optionally log to a file instead of stderr via the + :option:`--log-path` option. +* listeners: added :ref:`drain_type ` option. +* lua: added experimental :ref:`Lua filter `. +* mongo filter: added :ref:`fault injection `. +* mongo filter: added :ref:`"drain close" ` support. +* outlier detection: added :ref:`HTTP gateway failure type `. + See `DEPRECATED.md `_ + for outlier detection stats deprecations in this release. +* redis: the :ref:`redis proxy filter ` is now considered + production ready. +* redis: added :ref:`"drain close" ` functionality. +* router: added :ref:`x-envoy-overloaded ` support. +* router: added :ref:`regex ` route matching. +* router: added :ref:`custom request headers ` + for upstream requests. +* router: added :ref:`downstream IP hashing + ` for HTTP ketama routing. +* router: added :ref:`cookie hashing `. +* router: added :ref:`start_child_span ` option + to create child span for egress calls. +* router: added optional :ref:`upstream logs `. +* router: added complete :ref:`custom append/override/remove support + ` of request/response headers. +* router: added support to :ref:`specify response code during redirect + `. +* router: added :ref:`configuration ` + to return either a 404 or 503 if the upstream cluster does not exist. +* runtime: added :ref:`comment capability `. +* server: change default log level (:option:`-l`) to `info`. +* stats: maximum stat/name sizes and maximum number of stats are now variable via the + :option:`--max-obj-name-len` and :option:`--max-stats` options. +* tcp proxy: added :ref:`access logging `. +* tcp proxy: added :ref:`configurable connect retries + `. +* tcp proxy: enable use of :ref:`outlier detector `. +* tls: added :ref:`SNI support `. +* tls: added support for specifying :ref:`TLS session ticket keys + `. +* tls: allow configuration of the :ref:`min + ` and :ref:`max + ` TLS protocol versions. +* tracing: added :ref:`custom trace span decorators `. +* Many small bug fixes and performance improvements not listed. + +1.4.0 (August 24, 2017) +======================= + +* macOS is :repo:`now supported `. (A few features + are missing such as hot restart and original destination routing). +* YAML is now directly supported for :ref:`config files `. +* Added /routes admin endpoint. +* End-to-end flow control is now supported for TCP proxy, HTTP/1, and HTTP/2. HTTP flow control + that includes filter buffering is incomplete and will be implemented in 1.5.0. +* Log verbosity :repo:`compile time flag ` added. +* Hot restart :repo:`compile time flag ` added. +* Original destination :ref:`cluster ` + and :ref:`load balancer ` added. +* :ref:`WebSocket ` is now supported. +* Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure + no one is using this feature. +* Route :ref:`validate_clusters ` option added. +* :ref:`x-envoy-downstream-service-node ` + header added. +* :ref:`x-forwarded-client-cert ` header + added. +* Initial HTTP/1 forward proxy support for :ref:`absolute URLs + ` has been added. +* HTTP/2 codec settings are now :ref:`configurable `. +* gRPC/JSON transcoder :ref:`filter ` added. +* gRPC web :ref:`filter ` added. +* Configurable timeout for the rate limit service call in the :ref:`network + ` and :ref:`HTTP ` rate limit + filters. +* :ref:`x-envoy-retry-grpc-on ` header added. +* :ref:`LDS API ` added. +* TLS :ref:`require_client_certificate ` + option added. +* :ref:`Configuration check tool ` added. +* :ref:`JSON schema check tool ` added. +* Config validation mode added via the :option:`--mode` option. +* :option:`--local-address-ip-version` option added. +* IPv6 support is now complete. +* UDP :ref:`statsd_ip_address ` option added. +* Per-cluster :ref:`DNS resolvers ` added. +* :ref:`Fault filter ` enhancements and fixes. +* Several features are :repo:`deprecated as of the 1.4.0 release `. They + will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the + `HttpFilterConfigFactory` filter API has been deprecated in favor of + `NamedHttpFilterConfigFactory`. +* Many small bug fixes and performance improvements not listed. + +1.3.0 (May 17, 2017) +==================== + +* As of this release, we now have an official :repo:`breaking change policy + `. Note that there are numerous breaking configuration + changes in this release. They are not listed here. Future releases will adhere to the policy and + have clear documentation on deprecations and changes. +* Bazel is now the canonical build system (replacing CMake). There have been a huge number of + changes to the development/build/test flow. See :repo:`/bazel/README.md` and + :repo:`/ci/README.md` for more information. +* :ref:`Outlier detection ` has been expanded to include success + rate variance, and all parameters are now configurable in both runtime and in the JSON + configuration. +* TCP level :ref:`listener ` and + :ref:`cluster ` connections now + have configurable receive buffer limits at which point connection level back pressure is applied. + Full end to end flow control will be available in a future release. +* :ref:`Redis health checking ` has been added as an active + health check type. Full Redis support will be documented/supported in 1.4.0. +* :ref:`TCP health checking ` now supports a + "connect only" mode that only checks if the remote server can be connected to without + writing/reading any data. +* `BoringSSL `_ is now the only supported TLS provider. + The default cipher suites and ECDH curves have been updated with more modern defaults for both + :ref:`listener ` and + :ref:`cluster ` connections. +* The `header value match` :ref:`rate limit action + ` has been expanded to include an *expect + match* parameter. +* Route level HTTP rate limit configurations now do not inherit the virtual host level + configurations by default. The :ref:`include_vh_rate_limits + ` to inherit the virtual host level options if + desired. +* HTTP routes can now add request headers on a per route and per virtual host basis via the + :ref:`request_headers_to_add ` option. +* The :ref:`example configurations ` have been refreshed to demonstrate the + latest features. +* :ref:`per_try_timeout_ms ` can now be configured in + a route's retry policy in addition to via the :ref:`x-envoy-upstream-rq-per-try-timeout-ms + ` HTTP header. +* :ref:`HTTP virtual host matching ` now includes support + for prefix wildcard domains (e.g., `*.lyft.com`). +* The default for tracing random sampling has been changed to 100% and is still configurable in + :ref:`runtime `. +* :ref:`HTTP tracing configuration ` has been extended to allow tags + to be populated from arbitrary HTTP headers. +* The :ref:`HTTP rate limit filter ` can now be applied to internal, + external, or all requests via the `request_type` option. +* :ref:`Listener binding ` now requires specifying an `address` field. This can be + used to bind a listener to both a specific address as well as a port. +* The :ref:`MongoDB filter ` now emits a stat for queries that + do not have `$maxTimeMS` set. +* The :ref:`MongoDB filter ` now emits logs that are fully valid + JSON. +* The CPU profiler output path is now :ref:`configurable `. +* A :ref:`watchdog system ` has been added that can kill the server if a deadlock + is detected. +* A :ref:`route table checking tool ` has been added that can + be used to test route tables before use. +* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. +* Added additional cluster wide information related to outlier detection to the :ref:`/clusters + admin endpoint `. +* Multiple SANs can now be verified via the :ref:`verify_subject_alt_name + ` setting. Additionally, URI type SANs can be verified. +* HTTP filters can now be passed :ref:`opaque configuration + ` specified on a per route basis. +* By default Envoy now has a built in crash handler that will print a back trace. This behavior can + be disabled if desired via the ``--define=signal_trace=disabled`` Bazel option. +* Zipkin has been added as a supported :ref:`tracing provider `. +* Numerous small changes and fixes not listed here. + +1.2.0 (March 7, 2017) +===================== + +* :ref:`Cluster discovery service (CDS) API `. +* :ref:`Outlier detection ` (passive health checking). +* Envoy configuration is now checked against a :ref:`JSON schema `. +* :ref:`Ring hash ` consistent load balancer, as well as HTTP + consistent hash routing :ref:`based on a policy `. +* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP + rate limiting filter. +* HTTP routing to a cluster :ref:`retrieved from a header + `. +* :ref:`Weighted cluster ` HTTP + routing. +* :ref:`Auto host rewrite ` during HTTP + routing. +* :ref:`Regex header matching ` during HTTP routing. +* HTTP access log :ref:`runtime filter `. +* LightStep tracer :ref:`parent/child span association `. +* :ref:`Route discovery service (RDS) API `. +* HTTP router :ref:`x-envoy-upstream-rq-timeout-alt-response header + ` support. +* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for + iptables based transparent proxy support). +* TCP proxy filter :ref:`route table support `. +* Configurable :ref:`stats flush interval `. +* Various :ref:`third party library upgrades `, including using BoringSSL as + the default SSL provider. +* No longer maintain closed HTTP/2 streams for priority calculations. Leads to substantial memory + savings for large meshes. +* Numerous small changes and fixes not listed here. + +1.1.0 (November 30, 2016) +========================= + +* Switch from Jannson to RapidJSON for our JSON library (allowing for a configuration schema in + 1.2.0). +* Upgrade :ref:`recommended version ` of various other libraries. +* :ref:`Configurable DNS refresh rate ` for + DNS service discovery types. +* Upstream circuit breaker configuration can be :ref:`overridden via runtime + `. +* :ref:`Zone aware routing support `. +* Generic :ref:`header matching routing rule `. +* HTTP/2 :ref:`graceful connection draining ` (double + GOAWAY). +* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS + feature). +* Initial release of the :ref:`fault injection HTTP filter `. +* HTTP :ref:`rate limit filter ` enhancements (note that the + configuration for HTTP rate limiting is going to be overhauled in 1.2.0). +* Added :ref:`refused-stream retry policy `. +* Multiple :ref:`priority queues ` for upstream clusters + (configurable on a per route basis, with separate connection pools, circuit breakers, etc.). +* Added max connection circuit breaking to the :ref:`TCP proxy filter `. +* Added :ref:`CLI ` options for setting the logging file flush interval as well + as the drain/shutdown time during hot restart. +* A very large number of performance enhancements for core HTTP/TCP proxy flows as well as a + few new configuration flags to allow disabling expensive features if they are not needed + (specifically request ID generation and dynamic response code stats). +* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. +* Lots of other small fixes and enhancements not listed. + +1.0.0 (September 12, 2016) +========================== + +Initial open source release. diff --git a/docs/root/intro/what_is_envoy.rst b/docs/root/intro/what_is_envoy.rst new file mode 100644 index 000000000000..9aa139484cc1 --- /dev/null +++ b/docs/root/intro/what_is_envoy.rst @@ -0,0 +1,125 @@ +What is Envoy +-------------- + +Envoy is an L7 proxy and communication bus designed for large modern service oriented architectures. +The project was born out of the belief that: + + *The network should be transparent to applications. When network and application problems do occur + it should be easy to determine the source of the problem.* + +In practice, achieving the previously stated goal is incredibly difficult. Envoy attempts to do so +by providing the following high level features: + +**Out of process architecture:** Envoy is a self contained process that is designed to run +alongside every application server. All of the Envoys form a transparent communication mesh in which +each application sends and receives messages to and from localhost and is unaware of the network +topology. The out of process architecture has two substantial benefits over the traditional library +approach to service to service communication: + +* Envoy works with any application language. A single Envoy deployment can form a mesh between + Java, C++, Go, PHP, Python, etc. It is becoming increasingly common for service oriented + architectures to use multiple application frameworks and languages. Envoy transparently bridges + the gap. +* As anyone that has worked with a large service oriented architecture knows, deploying library + upgrades can be incredibly painful. Envoy can be deployed and upgraded quickly across an + entire infrastructure transparently. + +**Modern C++11 code base:** Envoy is written in C++11. Native code was chosen because we +believe that an architectural component such as Envoy should get out of the way as much as possible. +Modern application developers already deal with tail latencies that are difficult to reason about +due to deployments in shared cloud environments and the use of very productive but not particularly +well performing languages such as PHP, Python, Ruby, Scala, etc. Native code provides generally +excellent latency properties that don't add additional confusion to an already confusing situation. +Unlike other native code proxy solutions written in C, C++11 provides both excellent developer +productivity and performance. + +**L3/L4 filter architecture:** At its core, Envoy is an L3/L4 network proxy. A pluggable +:ref:`filter ` chain mechanism allows filters to be written to +perform different TCP proxy tasks and inserted into the main server. Filters have already been +written to support various tasks such as raw :ref:`TCP proxy `, +:ref:`HTTP proxy `, :ref:`TLS client certificate +authentication `, etc. + +**HTTP L7 filter architecture:** HTTP is such a critical component of modern application +architectures that Envoy :ref:`supports ` an additional HTTP L7 filter +layer. HTTP filters can be plugged into the HTTP connection management subsystem that perform +different tasks such as :ref:`buffering `, :ref:`rate limiting +`, :ref:`routing/forwarding `, sniffing +Amazon's :ref:`DynamoDB `, etc. + +**First class HTTP/2 support:** When operating in HTTP mode, Envoy :ref:`supports +` both HTTP/1.1 and HTTP/2. Envoy can operate as a transparent +HTTP/1.1 to HTTP/2 proxy in both directions. This means that any combination of HTTP/1.1 and HTTP/2 +clients and target servers can be bridged. The recommended service to service configuration uses +HTTP/2 between all Envoys to create a mesh of persistent connections that requests and responses can +be multiplexed over. Envoy does not support SPDY as the protocol is being phased out. + +**HTTP L7 routing:** When operating in HTTP mode, Envoy supports a +:ref:`routing ` subsystem that is capable of routing and redirecting +requests based on path, authority, content type, :ref:`runtime ` values, etc. +This functionality is most useful when using Envoy as a front/edge proxy but is also leveraged when +building a service to service mesh. + +**gRPC support:** `gRPC `_ is an RPC framework from Google that uses HTTP/2 +as the underlying multiplexed transport. Envoy :ref:`supports ` all of the +HTTP/2 features required to be used as the routing and load balancing substrate for gRPC requests +and responses. The two systems are very complementary. + +**MongoDB L7 support:** `MongoDB `_ is a popular database used in modern +web applications. Envoy :ref:`supports ` L7 sniffing, statistics production, +and logging for MongoDB connections. + +**DynamoDB L7 support**: `DynamoDB `_ is Amazon’s hosted key/value +NOSQL datastore. Envoy :ref:`supports ` L7 sniffing and statistics production +for DynamoDB connections. + +**Service discovery:** :ref:`Service discovery ` is a critical +component of service oriented architectures. Envoy supports multiple service discovery methods +including asynchronous DNS resolution and REST based lookup via a :ref:`service discovery service +`. + +**Health checking:** The :ref:`recommended ` +way of building an Envoy mesh is to treat service discovery as an eventually consistent process. +Envoy includes a :ref:`health checking ` subsystem which can +optionally perform active health checking of upstream service clusters. Envoy then uses the union of +service discovery and health checking information to determine healthy load balancing targets. Envoy +also supports passive health checking via an :ref:`outlier detection +` subsystem. + +**Advanced load balancing:** :ref:`Load balancing ` among different +components in a distributed system is a complex problem. Because Envoy is a self contained proxy +instead of a library, it is able to implement advanced load balancing techniques in a single place +and have them be accessible to any application. Currently Envoy includes support for :ref:`automatic +retries `, :ref:`circuit breaking `, +:ref:`global rate limiting ` via an external rate limiting service, +:ref:`request shadowing `, and +:ref:`outlier detection `. Future support is planned for request +racing. + +**Front/edge proxy support:** Although Envoy is primarily designed as a service to service +communication system, there is benefit in using the same software at the edge (observability, +management, identical service discovery and load balancing algorithms, etc.). Envoy includes enough +features to make it usable as an edge proxy for most modern web application use cases. This includes +:ref:`TLS ` termination, HTTP/1.1 and HTTP/2 :ref:`support +`, as well as HTTP L7 :ref:`routing `. + +**Best in class observability:** As stated above, the primary goal of Envoy is to make the network +transparent. However, problems occur both at the network level and at the application level. Envoy +includes robust :ref:`statistics ` support for all subsystems. `statsd +`_ (and compatible providers) is the currently supported statistics +sink, though plugging in a different one would not be difficult. Statistics are also viewable via +the :ref:`administration ` port. Envoy also supports distributed +:ref:`tracing ` via thirdparty providers. + +**Dynamic configuration:** Envoy optionally consumes a layered set of :ref:`dynamic configuration +APIs `. Implementors can use these APIs to build complex centrally +managed deployments if desired. + +Design goals +^^^^^^^^^^^^ + +A short note on the design goals of the code itself: Although Envoy is by no means slow (we have +spent considerable time optimizing certain fast paths), the code has been written to be modular and +easy to test versus aiming for the greatest possible absolute performance. It's our view that this +is a more efficient use of time given that typical deployments will be alongside languages and +runtimes many times slower and with many times greater memory usage. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst new file mode 100644 index 000000000000..69ce90f25c59 --- /dev/null +++ b/docs/root/operations/admin.rst @@ -0,0 +1,253 @@ +.. _operations_admin_interface: + +Administration interface +======================== + +Envoy exposes a local administration interface that can be used to query and +modify different aspects of the server: + +* :ref:`v1 API reference ` +* :ref:`v2 API reference ` + +.. _operations_admin_interface_security: + +.. attention:: + + The administration interface in its current form both allows destructive operations to be + performed (e.g., shutting down the server) as well as potentially exposes private information + (e.g., stats, cluster names, cert info, etc.). It is **critical** that access to the + administration interface is only allowed via a secure network. It is also **critical** that hosts + that access the administration interface are **only** attached to the secure network (i.e., to + avoid CSRF attacks). This involves setting up an appropriate firewall or optimally only allowing + access to the administration listener via localhost. This can be accomplished with a v2 + configuration like the following: + + .. code-block:: yaml + + admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 127.0.0.1, port_value: 9901 } + + In the future additional security options will be added to the administration interface. This + work is tracked in `this `_ issue. + + All mutations should be sent as HTTP POST operations. For a limited time, they will continue + to work with HTTP GET, with a warning logged. + +.. http:get:: / + + Render an HTML home page with a table of links to all available options. + +.. http:get:: /help + + Print a textual table of all available options. + +.. http:get:: /certs + + List out all loaded TLS certificates, including file name, serial number, and days until + expiration. + +.. _operations_admin_interface_clusters: + +.. http:get:: /clusters + + List out all configured :ref:`cluster manager ` clusters. This + information includes all discovered upstream hosts in each cluster along with per host statistics. + This is useful for debugging service discovery issues. + + Cluster manager information + - ``version_info`` string -- the version info string of the last loaded + :ref:`CDS` update. + If envoy does not have :ref:`CDS` setup, the + output will read ``version_info::static``. + + Cluster wide information + - :ref:`circuit breakers` settings for all priority settings. + + - Information about :ref:`outlier detection` if a detector is installed. Currently + :ref:`success rate average`, + and :ref:`ejection threshold` + are presented. Both of these values could be ``-1`` if there was not enough data to calculate them in the last + :ref:`interval`. + + - ``added_via_api`` flag -- ``false`` if the cluster was added via static configuration, ``true`` + if it was added via the :ref:`CDS` api. + + Per host statistics + .. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + cx_total, Counter, Total connections + cx_active, Gauge, Total active connections + cx_connect_fail, Counter, Total connection failures + rq_total, Counter, Total requests + rq_timeout, Counter, Total timed out requests + rq_success, Counter, Total requests with non-5xx responses + rq_error, Counter, Total requests with 5xx responses + rq_active, Gauge, Total active requests + healthy, String, The health status of the host. See below + weight, Integer, Load balancing weight (1-100) + zone, String, Service zone + canary, Boolean, Whether the host is a canary + success_rate, Double, "Request success rate (0-100). -1 if there was not enough + :ref:`request volume` + in the :ref:`interval` + to calculate it" + + Host health status + A host is either healthy or unhealthy because of one or more different failing health states. + If the host is healthy the ``healthy`` output will be equal to *healthy*. + + If the host is not healthy, the ``healthy`` output will be composed of one or more of the + following strings: + + */failed_active_hc*: The host has failed an :ref:`active health check + `. + + */failed_eds_health*: The host was marked unhealthy by EDS. + + */failed_outlier_check*: The host has failed an outlier detection check. + +.. _operations_admin_interface_config_dump: + +.. http:get:: /config_dump + + Dump currently loaded configuration from various Envoy components as JSON-serialized proto + messages. Currently, only route configs are available but more are on the way. See + :api:`envoy/admin/v2/config_dump.proto` for more information. That proto is in draft state and is + subject to change. + +.. http:post:: /cpuprofiler + + Enable or disable the CPU profiler. Requires compiling with gperftools. + +.. _operations_admin_interface_healthcheck_fail: + +.. http:post:: /healthcheck/fail + + Fail inbound health checks. This requires the use of the HTTP :ref:`health check filter + `. This is useful for draining a server prior to shutting it + down or doing a full restart. Invoking this command will universally fail health check requests + regardless of how the filter is configured (pass through, etc.). + +.. _operations_admin_interface_healthcheck_ok: + +.. http:post:: /healthcheck/ok + + Negate the effect of :http:post:`/healthcheck/fail`. This requires the use of the HTTP + :ref:`health check filter `. + +.. http:get:: /hot_restart_version + + See :option:`--hot-restart-version`. + +.. _operations_admin_interface_logging: + +.. http:post:: /logging + + Enable/disable different logging levels on different subcomponents. Generally only used during + development. + +.. http:post:: /quitquitquit + + Cleanly exit the server. + +.. http:post:: /reset_counters + + Reset all counters to zero. This is useful along with :http:get:`/stats` during debugging. Note + that this does not drop any data sent to statsd. It just effects local output of the + :http:get:`/stats` command. + +.. http:get:: /server_info + + Outputs information about the running server. Sample output looks like: + +.. code-block:: none + + envoy 267724/RELEASE live 1571 1571 0 + +The fields are: + +* Process name +* Compiled SHA and build type +* Health check state (live or draining) +* Current hot restart epoch uptime in seconds +* Total uptime in seconds (across all hot restarts) +* Current hot restart epoch + +.. _operations_admin_interface_stats: + +.. http:get:: /stats + + Outputs all statistics on demand. This includes only counters and gauges. Histograms are not + output as Envoy currently has no built in histogram support and relies on statsd for + aggregation. This command is very useful for local debugging. See :ref:`here ` + for more information. + + .. http:get:: /stats?format=json + + Outputs /stats in JSON format. This can be used for programmatic access of stats. + + .. http:get:: /stats?format=prometheus + + or alternatively, + + .. http:get:: /stats/prometheus + + Outputs /stats in `Prometheus `_ + v0.0.4 format. This can be used to integrate with a Prometheus server. Currently, only counters and + gauges are output. Histograms will be output in a future update. + +.. _operations_admin_interface_runtime: + +.. http:get:: /runtime + + Outputs all runtime values on demand in JSON format. See :ref:`here ` for + more information on how these values are configured and utilized. The output include the list of + the active runtime override layers and the stack of layer values for each key. Empty strings + indicate no value, and the final active value from the stack also is included in a separate key. + Example output: + +.. code-block:: json + + { + "layers": [ + "disk", + "override", + "admin", + ], + "entries": { + "my_key": { + "layer_values": [ + "my_disk_value", + "", + "" + ], + "final_value": "my_disk_value" + }, + "my_second_key": { + "layer_values": [ + "my_second_disk_value", + "my_disk_override_value", + "my_admin_override_value" + ], + "final_value": "my_admin_override_value" + } + } + } + +.. _operations_admin_interface_runtime_modify: + +.. http:post:: /runtime_modify?key1=value1&key2=value2&keyN=valueN + + Adds or modifies runtime values as passed in query parameters. To delete a previously added key, + use an empty string as the value. Note that deletion only applies to overrides added via this + endpoint; values loaded from disk can be modified via override but not deleted. + +.. attention:: + + Use the /runtime_modify endpoint with care. Changes are effectively immediately. It is + **critical** that the admin interface is :ref:`properly secured + `. diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst new file mode 100644 index 000000000000..64fd31f0fe2d --- /dev/null +++ b/docs/root/operations/cli.rst @@ -0,0 +1,231 @@ +.. _operations_cli: + +Command line options +==================== + +Envoy is driven both by a JSON configuration file as well as a set of command line options. The +following are the command line options that Envoy supports. + +.. option:: -c , --config-path + + *(optional)* The path to the v1 or v2 :ref:`JSON/YAML/proto3 configuration + file `. If this flag is missing, :option:`--config-yaml` is required. + This will be parsed as a :ref:`v2 bootstrap configuration file + ` and on failure, subject to + :option:`--v2-config-only`, will be considered as a :ref:`v1 JSON + configuration file `. For v2 configuration files, valid + extensions are ``.json``, ``.yaml``, ``.pb`` and ``.pb_text``, which indicate + JSON, YAML, `binary proto3 + `_ and `text + proto3 + `_ + formats respectively. + +.. option:: --config-yaml + + *(optional)* The YAML string for a v2 bootstrap configuration. If :option:`--config-path` is also set, + the values in this YAML string will override and merge with the bootstrap loaded from :option:`--config-path`. + Because YAML is a superset of JSON, a JSON string may also be passed to :option:`--config-yaml`. + :option:`--config-yaml` is not compatible with bootstrap v1. + + Example overriding the node id on the command line: + + ./envoy -c bootstrap.yaml --config-yaml "node: {id: 'node1'}" + +.. option:: --v2-config-only + + *(optional)* This flag determines whether the configuration file should only + be parsed as a :ref:`v2 bootstrap configuration file + `. If false (default), when a v2 bootstrap + config parse fails, a second attempt to parse the config as a :ref:`v1 JSON + configuration file ` will be made. + +.. option:: --mode + + *(optional)* One of the operating modes for Envoy: + + * ``serve``: *(default)* Validate the JSON configuration and then serve traffic normally. + + * ``validate``: Validate the JSON configuration and then exit, printing either an "OK" message (in + which case the exit code is 0) or any errors generated by the configuration file (exit code 1). + No network traffic is generated, and the hot restart process is not performed, so no other Envoy + process on the machine will be disturbed. + +.. option:: --admin-address-path + + *(optional)* The output file path where the admin address and port will be written. + +.. option:: --local-address-ip-version + + *(optional)* The IP address version that is used to populate the server local IP address. This + parameter affects various headers including what is appended to the X-Forwarded-For (XFF) header. + The options are ``v4`` or ``v6``. The default is ``v4``. + +.. option:: --base-id + + *(optional)* The base ID to use when allocating shared memory regions. Envoy uses shared memory + regions during :ref:`hot restart `. Most users will never have to + set this option. However, if Envoy needs to be run multiple times on the same machine, each + running Envoy will need a unique base ID so that the shared memory regions do not conflict. + +.. option:: --concurrency + + *(optional)* The number of :ref:`worker threads ` to run. If not + specified defaults to the number of hardware threads on the machine. + +.. option:: -l , --log-level + + *(optional)* The logging level. Non developers should generally never set this option. See the + help text for the available log levels and the default. + +.. option:: --log-path + + *(optional)* The output file path where logs should be written. This file will be re-opened + when SIGUSR1 is handled. If this is not set, log to stderr. + +.. option:: --log-format + + *(optional)* The format string to use for laying out the log message metadata. If this is not + set, a default format string ``"[%Y-%m-%d %T.%e][%t][%l][%n] %v"`` is used. + + The supported format flags are (with example output): + + :%v: The actual message to log ("some user text") + :%t: Thread id ("1232") + :%P: Process id ("3456") + :%n: Logger's name ("filter") + :%l: The log level of the message ("debug", "info", etc.) + :%L: Short log level of the message ("D", "I", etc.) + :%a: Abbreviated weekday name ("Tue") + :%A: Full weekday name ("Tuesday") + :%b: Abbreviated month name ("Mar") + :%B: Full month name ("March") + :%c: Date and time representation ("Tue Mar 27 15:25:06 2018") + :%C: Year in 2 digits ("18") + :%Y: Year in 4 digits ("2018") + :%D, %x: Short MM/DD/YY date ("03/27/18") + :%m: Month 01-12 ("03") + :%d: Day of month 01-31 ("27") + :%H: Hours in 24 format 00-23 ("15") + :%I: Hours in 12 format 01-12 ("03") + :%M: Minutes 00-59 ("25") + :%S: Seconds 00-59 ("06") + :%e: Millisecond part of the current second 000-999 ("008") + :%f: Microsecond part of the current second 000000-999999 ("008789") + :%F: Nanosecond part of the current second 000000000-999999999 ("008789123") + :%p: AM/PM ("AM") + :%r: 12-hour clock ("03:25:06 PM") + :%R: 24-hour HH:MM time, equivalent to %H:%M ("15:25") + :%T, %X: ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S ("13:25:06") + :%z: ISO 8601 offset from UTC in timezone ([+/-]HH:MM) ("-07:00") + :%%: The % sign ("%") + +.. option:: --restart-epoch + + *(optional)* The :ref:`hot restart ` epoch. (The number of times + Envoy has been hot restarted instead of a fresh start). Defaults to 0 for the first start. This + option tells Envoy whether to attempt to create the shared memory region needed for hot restart, + or whether to open an existing one. It should be incremented every time a hot restart takes place. + The :ref:`hot restart wrapper ` sets the *RESTART_EPOCH* environment + variable which should be passed to this option in most cases. + +.. option:: --hot-restart-version + + *(optional)* Outputs an opaque hot restart compatibility version for the binary. This can be + matched against the output of the :http:get:`/hot_restart_version` admin endpoint to determine + whether the new binary and the running binary are hot restart compatible. + +.. option:: --service-cluster + + *(optional)* Defines the local service cluster name where Envoy is running. The + local service cluster name is first sourced from the :ref:`Bootstrap node + ` message's :ref:`cluster + ` field. This CLI option provides an alternative + method for specifying this value and will override any value set in bootstrap + configuration. It should be set if any of the following features are used: + :ref:`statsd `, :ref:`health check cluster + verification `, + :ref:`runtime override directory `, + :ref:`user agent addition `, + :ref:`HTTP global rate limiting `, + :ref:`CDS `, and :ref:`HTTP tracing + `, either via this CLI option or in the bootstrap + configuration. + +.. option:: --service-node + + *(optional)* Defines the local service node name where Envoy is running. The + local service node name is first sourced from the :ref:`Bootstrap node + ` message's :ref:`id + ` field. This CLI option provides an alternative + method for specifying this value and will override any value set in bootstrap + configuration. It should be set if any of the following features are used: + :ref:`statsd `, :ref:`CDS + `, and :ref:`HTTP tracing + `, either via this CLI option or in the bootstrap + configuration. + +.. option:: --service-zone + + *(optional)* Defines the local service zone where Envoy is running. The local + service zone is first sourced from the :ref:`Bootstrap node + ` message's :ref:`locality.zone + ` field. This CLI option provides an + alternative method for specifying this value and will override any value set + in bootstrap configuration. It should be set if discovery service routing is + used and the discovery service exposes :ref:`zone data + `, either via this CLI option or in + the bootstrap configuration. The meaning of zone is context dependent, e.g. + `Availability Zone (AZ) + `_ + on AWS, `Zone `_ on GCP, + etc. + + +.. option:: --file-flush-interval-msec + + *(optional)* The file flushing interval in milliseconds. Defaults to 10 seconds. + This setting is used during file creation to determine the duration between flushes + of buffers to files. The buffer will flush every time it gets full, or every time + the interval has elapsed, whichever comes first. Adjusting this setting is useful + when tailing :ref:`access logs ` in order to + get more (or less) immediate flushing. + +.. option:: --drain-time-s + + *(optional)* The time in seconds that Envoy will drain connections during a hot restart. See the + :ref:`hot restart overview ` for more information. Defaults to 600 + seconds (10 minutes). Generally the drain time should be less than the parent shutdown time + set via the :option:`--parent-shutdown-time-s` option. How the two settings are configured + depends on the specific deployment. In edge scenarios, it might be desirable to have a very long + drain time. In service to service scenarios, it might be possible to make the drain and shutdown + time much shorter (e.g., 60s/90s). + +.. option:: --parent-shutdown-time-s + + *(optional)* The time in seconds that Envoy will wait before shutting down the parent process + during a hot restart. See the :ref:`hot restart overview ` for more + information. Defaults to 900 seconds (15 minutes). + +.. option:: --max-obj-name-len + + *(optional)* The maximum name length (in bytes) of the name field in a cluster/route_config/listener. + This setting is typically used in scenarios where the cluster names are auto generated, and often exceed + the built-in limit of 60 characters. Defaults to 60. + + .. attention:: + + This setting affects the output of :option:`--hot-restart-version`. If you started envoy with this + option set to a non default value, you should use the same option (and same value) for subsequent hot + restarts. + +.. option:: --max-stats + + *(optional)* The maximum number of stats that can be shared between hot-restarts. This setting + affects the output of :option:`--hot-restart-version`; the same value must be used to hot + restart. Defaults to 16384. + +.. option:: --disable-hot-restart + + *(optional)* This flag disables Envoy hot restart for builds that have it enabled. By default, hot + restart is enabled. diff --git a/docs/root/operations/fs_flags.rst b/docs/root/operations/fs_flags.rst new file mode 100644 index 000000000000..72aaca1f54d4 --- /dev/null +++ b/docs/root/operations/fs_flags.rst @@ -0,0 +1,13 @@ +.. _operations_file_system_flags: + +File system flags +================= + +Envoy supports file system "flags" that alter state at startup. This is used to persist changes +between restarts if necessary. The flag files should be placed in the directory specified in the +:ref:`flags_path ` configuration option. The currently supported +flag files are: + +drain + If this file exists, Envoy will start in HC failing mode, similar to after the + :http:post:`/healthcheck/fail` command has been executed. diff --git a/docs/root/operations/hot_restarter.rst b/docs/root/operations/hot_restarter.rst new file mode 100644 index 000000000000..a4b17c5ca75c --- /dev/null +++ b/docs/root/operations/hot_restarter.rst @@ -0,0 +1,37 @@ +.. _operations_hot_restarter: + +Hot restart Python wrapper +========================== + +Typically, Envoy will be :ref:`hot restarted ` for config changes and +binary updates. However, in many cases, users will wish to use a standard process manager such as +monit, runit, etc. We provide :repo:`/restarter/hot-restarter.py` to make this straightforward. + +The restarter is invoked like so: + +.. code-block:: console + + hot-restarter.py start_envoy.sh + +`start_envoy.sh` might be defined like so (using salt/jinja like syntax): + +.. code-block:: jinja + + #!/bin/bash + + ulimit -n {{ pillar.get('envoy_max_open_files', '102400') }} + exec /usr/sbin/envoy -c /etc/envoy/envoy.cfg --restart-epoch $RESTART_EPOCH --service-cluster {{ grains['cluster_name'] }} --service-node {{ grains['service_node'] }} --service-zone {{ grains.get('ec2_availability-zone', 'unknown') }} + +The *RESTART_EPOCH* environment variable is set by the restarter on each restart and can be passed +to the :option:`--restart-epoch` option. + +The restarter handles the following signals: + +* **SIGTERM**: Will cleanly terminate all child processes and exit. +* **SIGHUP**: Will hot restart by re-invoking whatever is passed as the first argument to the + hot restart script. +* **SIGCHLD**: If any of the child processes shut down unexpectedly, the restart script will shut + everything down and exit to avoid being in an unexpected state. The controlling process manager + should then restart the restarter script to start Envoy again. +* **SIGUSR1**: Will be forwarded to Envoy as a signal to reopen all access logs. This is used for + atomic move and reopen log rotation. diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst new file mode 100644 index 000000000000..8f813eff3eb9 --- /dev/null +++ b/docs/root/operations/operations.rst @@ -0,0 +1,14 @@ +.. _operations: + +Operations and administration +============================= + +.. toctree:: + :maxdepth: 2 + + cli + hot_restarter + admin + stats_overview + runtime + fs_flags diff --git a/docs/root/operations/runtime.rst b/docs/root/operations/runtime.rst new file mode 100644 index 000000000000..4fdb15ddf70d --- /dev/null +++ b/docs/root/operations/runtime.rst @@ -0,0 +1,8 @@ +.. _operations_runtime: + +Runtime +======= + +:ref:`Runtime configuration ` can be used to modify various server settings +without restarting Envoy. The runtime settings that are available depend on how the server is +configured. They are documented in the relevant sections of the :ref:`configuration guide `. diff --git a/docs/root/operations/stats_overview.rst b/docs/root/operations/stats_overview.rst new file mode 100644 index 000000000000..84c94984eba5 --- /dev/null +++ b/docs/root/operations/stats_overview.rst @@ -0,0 +1,13 @@ +.. _operations_stats: + +Statistics overview +=================== + +Envoy outputs numerous statistics which depend on how the server is configured. They can be seen +locally via the :http:get:`/stats` command and are typically sent to a :ref:`statsd cluster +`. The statistics that are output are documented in the relevant +sections of the :ref:`configuration guide `. Some of the more important statistics that will +almost always be used can be found in the following sections: + +* :ref:`HTTP connection manager ` +* :ref:`Upstream cluster ` diff --git a/docs/root/start/distro/ambassador.rst b/docs/root/start/distro/ambassador.rst new file mode 100644 index 000000000000..54c19279c56d --- /dev/null +++ b/docs/root/start/distro/ambassador.rst @@ -0,0 +1,125 @@ +.. _install_ambassador: + +Envoy as an API Gateway in Kubernetes +===================================== + +A common scenario for using Envoy is deploying it as an edge service (API +Gateway) in Kubernetes. `Ambassador `_ is an open +source distribution of Envoy designed for Kubernetes. Ambassador uses Envoy for +all L4/L7 management and Kubernetes for reliability, availability, and +scalability. Ambassador operates as a specialized control plane to expose +Envoy's functionality as Kubernetes annotations. + +This example will walk through how you can deploy Envoy on Kubernetes via +Ambassador. + +Deploying Ambassador +-------------------- + +Ambassador is configured via Kubernetes deployments. To install Ambassador/Envoy +on Kubernetes, run the following if you're using a cluster with RBAC enabled: + +.. code-block:: console + + kubectl apply -f https://www.getambassador.io/yaml/ambassador/ambassador-rbac.yaml + +or this if you are not using RBAC: + +.. code-block:: console + + kubectl apply -f https://www.getambassador.io/yaml/ambassador/ambassador-no-rbac.yaml + +The above YAML will create a Kubernetes deployment for Ambassador that includes +readiness and liveness checks. By default, it will also create 3 instances of +Ambassador. Each Ambassador instance consists of an Envoy proxy along with the +Ambassador control plane. + +We'll now need to create a Kubernetes service to point to the Ambassador +deployment. In this example, we'll use a ``LoadBalancer`` service. If your +cluster doesn't support ``LoadBalancer`` services, you'll need to change to a +``NodePort`` or ``ClusterIP``. + +.. code-block:: yaml + + --- + apiVersion: v1 + kind: Service + metadata: + labels: + service: ambassador + name: ambassador + spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + selector: + service: ambassador + +Save this YAML to a file ``ambassador-svc.yaml``. Then, deploy this service to +Kubernetes: + +.. code-block:: console + + kubectl apply -f ambassador-svc.yaml + +At this point, Envoy is now running on your cluster, along with the Ambassador +control plane. + +Configuring Ambassador +---------------------- + +Ambassador uses Kubernetes annotations to add or remove configuration. This +sample YAML will add a route to Google, similar to the basic configuration +example in the :ref:`Getting Started guide `. + +.. code-block:: yaml + + --- + apiVersion: v1 + kind: Service + metadata: + name: google + annotations: + getambassador.io/config: | + --- + apiVersion: ambassador/v0 + kind: Mapping + name: google_mapping + prefix: /google/ + service: https://google.com:443 + host_rewrite: www.google.com + spec: + type: ClusterIP + clusterIP: None + +Save the above into a file called ``google.yaml``. Then run: + +.. code-block:: console + + kubectl apply -f google.yaml + +Ambassador will detect the change to your Kubernetes annotation and add the +route to Envoy. Note that we used a dummy service in this example; typically, +you would associate the annotation with your real Kubernetes service. + +Testing the mapping +------------------- + +You can test this mapping by getting the external IP address for the Ambassador +service, and then sending a request via ``curl``. + +.. code-block:: console + + $ kubectl get svc ambassador + NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE + ambassador 10.19.241.98 35.225.154.81 80:32491/TCP 15m + $ curl -v 35.225.154.81/google/ + +More +---- + +Ambassador exposes multiple Envoy features on mappings, such as CORS, weighted +round robin, gRPC, TLS, and timeouts. For more information, read the +`configuration documentation +`_. diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst new file mode 100644 index 000000000000..73510f0e65e8 --- /dev/null +++ b/docs/root/start/sandboxes/front_proxy.rst @@ -0,0 +1,228 @@ +.. _install_sandboxes_front_proxy: + +Front Proxy +=========== + +To get a flavor of what Envoy has to offer as a front proxy, we are releasing a +`docker compose `_ sandbox that deploys a front +envoy and a couple of services (simple flask apps) colocated with a running +service envoy. The three containers will be deployed inside a virtual network +called ``envoymesh``. + +Below you can see a graphic showing the docker compose deployment: + +.. image:: /_static/docker_compose_v0.1.svg + :width: 100% + +All incoming requests are routed via the front envoy, which is acting as a reverse proxy sitting on +the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose +(see :repo:`/examples/front-proxy/docker-compose.yml`). Moreover, notice +that all traffic routed by the front envoy to the service containers is actually routed to the +service envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service +envoys route the request to the flask app via the loopback address (routes setup in +:repo:`/examples/front-proxy/service-envoy.yaml`). This setup +illustrates the advantage of running service envoys collocated with your services: all requests are +handled by the service envoy, and efficiently routed to your services. + +Running the Sandbox +~~~~~~~~~~~~~~~~~~~ + +The following documentation runs through the setup of an envoy cluster organized +as is described in the image above. + +**Step 1: Install Docker** + +Ensure that you have a recent versions of ``docker, docker-compose`` and +``docker-machine`` installed. + +A simple way to achieve this is via the `Docker Toolbox `_. + +**Step 2: Docker Machine setup** + +First let's create a new machine which will hold the containers:: + + $ docker-machine create --driver virtualbox default + $ eval $(docker-machine env default) + +**Step 4: Clone the Envoy repo, and start all of our containers** + +If you have not cloned the envoy repo, clone it with ``git clone git@github.com:envoyproxy/envoy`` +or ``git clone https://github.com/envoyproxy/envoy.git``:: + + $ pwd + envoy/examples/front-proxy + $ docker-compose up --build -d + $ docker-compose ps + Name Command State Ports + ------------------------------------------------------------------------------------------------------------- + example_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + example_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + example_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + +**Step 5: Test Envoy's routing capabilities** + +You can now send a request to both services via the front-envoy. + +For service1:: + + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:39:19 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + * Connection #0 to host 192.168.99.100 left intact + +For service2:: + + $ curl -v $(docker-machine ip default):8000/service/2 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/2 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 2 + < server: envoy + < date: Fri, 26 Aug 2016 19:39:23 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 + * Connection #0 to host 192.168.99.100 left intact + +Notice that each request, while sent to the front envoy, was correctly routed +to the respective application. + +**Step 6: Test Envoy's load balancing capabilities** + +Now let's scale up our service1 nodes to demonstrate the clustering abilities +of envoy.:: + + $ docker-compose scale service1=3 + Creating and starting example_service1_2 ... done + Creating and starting example_service1_3 ... done + +Now if we send a request to service1 multiple times, the front envoy will load balance the +requests by doing a round robin of the three service1 machines:: + + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:40:21 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 + * Connection #0 to host 192.168.99.100 left intact + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:40:22 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 + * Connection #0 to host 192.168.99.100 left intact + $ curl -v $(docker-machine ip default):8000/service/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /service/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:40:24 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + * Connection #0 to host 192.168.99.100 left intact + +**Step 7: enter containers and curl services** + +In addition of using ``curl`` from your host machine, you can also enter the +containers themselves and ``curl`` from inside them. To enter a container you +can use ``docker-compose exec /bin/bash``. For example we can +enter the ``front-envoy`` container, and ``curl`` for services locally:: + + $ docker-compose exec front-envoy /bin/bash + root@81288499f9d7:/# curl localhost:80/service/1 + Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 + root@81288499f9d7:/# curl localhost:80/service/1 + Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 + root@81288499f9d7:/# curl localhost:80/service/1 + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + root@81288499f9d7:/# curl localhost:80/service/2 + Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 + +**Step 8: enter containers and curl admin** + +When envoy runs it also attaches an ``admin`` to your desired port. In the example +configs the admin is bound to port ``8001``. We can ``curl`` it to gain useful information. +For example you can ``curl`` ``/server_info`` to get information about the +envoy version you are running. Additionally you can ``curl`` ``/stats`` to get +statistics. For example inside ``frontenvoy`` we can get:: + + $ docker-compose exec front-envoy /bin/bash + root@e654c2c83277:/# curl localhost:8001/server_info + envoy 10e00b/RELEASE live 142 142 0 + root@e654c2c83277:/# curl localhost:8001/stats + cluster.service1.external.upstream_rq_200: 7 + ... + cluster.service1.membership_change: 2 + cluster.service1.membership_total: 3 + ... + cluster.service1.upstream_cx_http2_total: 3 + ... + cluster.service1.upstream_rq_total: 7 + ... + cluster.service2.external.upstream_rq_200: 2 + ... + cluster.service2.membership_change: 1 + cluster.service2.membership_total: 1 + ... + cluster.service2.upstream_cx_http2_total: 1 + ... + cluster.service2.upstream_rq_total: 2 + ... + +Notice that we can get the number of members of upstream clusters, number of requests +fulfilled by them, information about http ingress, and a plethora of other useful +stats. diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst new file mode 100644 index 000000000000..09798b3e1291 --- /dev/null +++ b/docs/root/start/sandboxes/grpc_bridge.rst @@ -0,0 +1,68 @@ +.. _install_sandboxes_grpc_bridge: + +gRPC Bridge +=========== + +Envoy gRPC +~~~~~~~~~~ + +The gRPC bridge sandbox is an example usage of Envoy's +:ref:`gRPC bridge filter `. +Included in the sandbox is a gRPC in-memory Key/Value store with a Python HTTP +client. The Python client makes HTTP/1 requests through the Envoy sidecar +process which are upgraded into HTTP/2 gRPC requests. Response trailers are then +buffered and sent back to the client as a HTTP/1 header payload. + +Another Envoy feature demonstrated in this example is Envoy's ability to do authority +base routing via its route configuration. + +Building the Go service +~~~~~~~~~~~~~~~~~~~~~~~ + +To build the Go gRPC service run:: + + $ pwd + envoy/examples/grpc-bridge + $ script/bootstrap + $ script/build + +Note: ``build`` requires that your Envoy codebase (or a working copy thereof) is in ``$GOPATH/src/github.com/envoyproxy/envoy``. + +Docker compose +~~~~~~~~~~~~~~ + +To run the docker compose file, and set up both the Python and the gRPC containers +run:: + + $ pwd + envoy/examples/grpc-bridge + $ docker-compose up --build + +Sending requests to the Key/Value store +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To use the Python service and send gRPC requests:: + + $ pwd + envoy/examples/grpc-bridge + # set a key + $ docker-compose exec python /client/client.py set foo bar + setf foo to bar + + # get a key + $ docker-compose exec python /client/client.py get foo + bar + + # modify an existing key + $ docker-compose exec python /client/client.py set foo baz + setf foo to baz + + # get the modified key + $ docker-compose exec python /client/client.py get foo + baz + +In the running docker-compose container, you should see the gRPC service printing a record of its activity:: + + grpc_1 | 2017/05/30 12:05:09 set: foo = bar + grpc_1 | 2017/05/30 12:05:12 get: foo + grpc_1 | 2017/05/30 12:05:18 set: foo = baz diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst new file mode 100644 index 000000000000..0c17b8181438 --- /dev/null +++ b/docs/root/start/sandboxes/jaeger_tracing.rst @@ -0,0 +1,81 @@ +.. _install_sandboxes_jaeger_tracing: + +Jaeger Tracing +============== + +The Jaeger tracing sandbox demonstrates Envoy's :ref:`request tracing ` +capabilities using `Jaeger `_ as the tracing provider. This sandbox +is very similar to the front proxy architecture described above, with one difference: +service1 makes an API call to service2 before returning a response. +The three containers will be deployed inside a virtual network called ``envoymesh``. + +All incoming requests are routed via the front envoy, which is acting as a reverse proxy +sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yml`). Notice that +all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in +:repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated +by the Jaeger tracer to a Jaeger cluster (trace driver setup +in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`). + +Before routing a request to the appropriate service envoy or the application, Envoy will take +care of generating the appropriate spans for tracing (parent/child context spans). +At a high-level, each span records the latency of upstream API calls as well as information +needed to correlate the span with other related spans (e.g., the trace ID). + +One of the most important benefits of tracing from Envoy is that it will take care of +propagating the traces to the Jaeger service cluster. However, in order to fully take advantage +of tracing, the application has to propagate trace headers that Envoy generates, while making +calls to other services. In the sandbox we have provided, the simple flask app +(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates +the trace headers while making an outbound call to service2. + + +Running the Sandbox +~~~~~~~~~~~~~~~~~~~ + +The following documentation runs through the setup of an envoy cluster organized +as is described in the image above. + +**Step 1: Build the sandbox** + +To build this sandbox example, and start the example apps run the following commands:: + + $ pwd + envoy/examples/jaeger-tracing + $ docker-compose up --build -d + $ docker-compose ps + Name Command State Ports + ------------------------------------------------------------------------------------------------------------- + jaegertracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + jaegertracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + jaegertracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + +**Step 2: Generate some load** + +You can now send a request to service1 via the front-envoy as follows:: + + $ curl -v $(docker-machine ip default):8000/trace/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /trace/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:39:19 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + * Connection #0 to host 192.168.99.100 left intact + +**Step 3: View the traces in Jaeger UI** + +Point your browser to http://localhost:16686 . You should see the Jaeger dashboard. +Set the service to "front-proxy" and hit 'Find Traces'. You should see traces from the front-proxy. +Click on a trace to explore the path taken by the request from front-proxy to service1 +to service2, as well as the latency incurred at each hop. diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst new file mode 100644 index 000000000000..53a97dcc8f3a --- /dev/null +++ b/docs/root/start/sandboxes/zipkin_tracing.rst @@ -0,0 +1,83 @@ +.. _install_sandboxes_zipkin_tracing: + +Zipkin Tracing +============== + +The Zipkin tracing sandbox demonstrates Envoy's :ref:`request tracing ` +capabilities using `Zipkin `_ as the tracing provider. This sandbox +is very similar to the front proxy architecture described above, with one difference: +service1 makes an API call to service2 before returning a response. +The three containers will be deployed inside a virtual network called ``envoymesh``. + +All incoming requests are routed via the front envoy, which is acting as a reverse proxy +sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` +by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yml`). Notice that +all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in +:repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated +by the Zipkin tracer to a Zipkin cluster (trace driver setup +in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`). + +Before routing a request to the appropriate service envoy or the application, Envoy will take +care of generating the appropriate spans for tracing (parent/child/shared context spans). +At a high-level, each span records the latency of upstream API calls as well as information +needed to correlate the span with other related spans (e.g., the trace ID). + +One of the most important benefits of tracing from Envoy is that it will take care of +propagating the traces to the Zipkin service cluster. However, in order to fully take advantage +of tracing, the application has to propagate trace headers that Envoy generates, while making +calls to other services. In the sandbox we have provided, the simple flask app +(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates +the trace headers while making an outbound call to service2. + + +Running the Sandbox +~~~~~~~~~~~~~~~~~~~ + +The following documentation runs through the setup of an envoy cluster organized +as is described in the image above. + +**Step 1: Build the sandbox** + +To build this sandbox example, and start the example apps run the following commands:: + + $ pwd + envoy/examples/zipkin-tracing + $ docker-compose up --build -d + $ docker-compose ps + Name Command State Ports + ------------------------------------------------------------------------------------------------------------- + zipkintracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + zipkintracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp + zipkintracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp + +**Step 2: Generate some load** + +You can now send a request to service1 via the front-envoy as follows:: + + $ curl -v $(docker-machine ip default):8000/trace/1 + * Trying 192.168.99.100... + * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) + > GET /trace/1 HTTP/1.1 + > Host: 192.168.99.100:8000 + > User-Agent: curl/7.43.0 + > Accept: */* + > + < HTTP/1.1 200 OK + < content-type: text/html; charset=utf-8 + < content-length: 89 + < x-envoy-upstream-service-time: 1 + < server: envoy + < date: Fri, 26 Aug 2016 19:39:19 GMT + < x-envoy-protocol-version: HTTP/1.1 + < + Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 + * Connection #0 to host 192.168.99.100 left intact + +**Step 3: View the traces in Zipkin UI** + +Point your browser to http://localhost:9411 . You should see the Zipkin dashboard. +If this ip address is incorrect, you can find the correct one by running: ``$ docker-machine ip default``. +Set the service to "front-proxy" and set the start time to a few minutes before +the start of the test (step 2) and hit enter. You should see traces from the front-proxy. +Click on a trace to explore the path taken by the request from front-proxy to service1 +to service2, as well as the latency incurred at each hop. diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst new file mode 100644 index 000000000000..05c67d0a069b --- /dev/null +++ b/docs/root/start/start.rst @@ -0,0 +1,163 @@ +.. _start: + +Getting Started +=============== + +This section gets you started with a very simple configuration and provides some example configurations. + +Envoy does not currently provide separate pre-built binaries, but does provide Docker images. This is +the fastest way to get started using Envoy. Should you wish to use Envoy outside of a +Docker container, you will need to :ref:`build it `. + +These examples use the :ref:`v2 Envoy API `, but use only the static configuration +feature of the API, which is most useful for simple requirements. For more complex requirements +:ref:`Dynamic Configuration ` is supported. + +Quick Start to Run Simple Example +--------------------------------- + +These instructions run from files in the Envoy repo. The sections below give a +more detailed explanation of the configuration file and execution steps for +the same configuration. + +A very minimal Envoy configuration that can be used to validate basic plain HTTP +proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not +intended to represent a realistic Envoy deployment. + + $ docker pull envoyproxy/envoy:latest + $ docker run --rm -d -p 10000:10000 envoyproxy/envoy:latest + $ curl -v localhost:10000 + +The Docker image used will contain the latest version of Envoy +and a basic Envoy configuration. This basic configuration tells +Envoy to route incoming requests to \*.google.com. + +Simple Configuration +-------------------- + +Envoy can be configured using a single YAML file passed in as an argument on the command line. + +The :ref:`admin message ` is required to configure +the administration server. The `address` key specifies the +listening :ref:`address ` +which in this case is simply `0.0.0.0:9901`. + +.. code-block:: yaml + + admin: + access_log_path: /tmp/admin_access.log + address: + socket_address: { address: 0.0.0.0, port_value: 9901 } + +The :ref:`static_resources ` contains everything that is configured statically when Envoy starts, +as opposed to the means of configuring resources dynamically when Envoy is running. +The :ref:`v2 API Overview ` describes this. + +.. code-block:: yaml + + static_resources: + +The specification of the :ref:`listeners `. + +.. code-block:: yaml + + listeners: + - name: listener_0 + address: + socket_address: { address: 0.0.0.0, port_value: 10000 } + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + stat_prefix: ingress_http + codec_type: AUTO + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: { prefix: "/" } + route: { host_rewrite: www.google.com, cluster: service_google } + http_filters: + - name: envoy.router + +The specification of the :ref:`clusters `. + +.. code-block:: yaml + + clusters: + - name: service_google + connect_timeout: 0.25s + type: LOGICAL_DNS + # Comment out the following line to test on v6 networks + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + hosts: [{ socket_address: { address: google.com, port_value: 443 }}] + tls_context: { sni: www.google.com } + + +Using the Envoy Docker Image +---------------------------- + +Create a simple Dockerfile to execute Envoy, which assumes that envoy.yaml (described above) is in your local directory. +You can refer to the :ref:`Command line options `. + +.. code-block:: none + + FROM envoyproxy/envoy:latest + COPY envoy.yaml /etc/envoy/envoy.yaml + +Build the Docker image that runs your configuration using:: + + $ docker build -t envoy:v1 + +And now you can execute it with:: + + $ docker run -d --name envoy -p 9901:9901 -p 10000:10000 envoy:v1 + +And finally test is using:: + + $ curl -v localhost:10000 + +If you would like to use envoy with docker-compose you can overwrite the provided configuration file +by using a volume. + +.. code-block: yaml + + version: '3' + services: + envoy: + image: envoyproxy/envoy:latest + ports: + - "10000:10000" + volumes: + - ./envoy.yaml:/etc/envoy/envoy.yaml + + +Sandboxes +--------- + +We've created a number of sandboxes using Docker Compose that set up different +environments to test out Envoy's features and show sample configurations. As we +gauge peoples' interests we will add more sandboxes demonstrating different +features. The following sandboxes are available: + +.. toctree:: + :maxdepth: 1 + + sandboxes/front_proxy + sandboxes/zipkin_tracing + sandboxes/jaeger_tracing + sandboxes/grpc_bridge + +Other use cases +--------------- + +In addition to the proxy itself, Envoy is also bundled as part of several open +source distributions that target specific use cases. + +.. toctree:: + :maxdepth: 1 + + distro/ambassador diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc index f8114df1772b..cc0818994860 100644 --- a/source/common/access_log/access_log_formatter.cc +++ b/source/common/access_log/access_log_formatter.cc @@ -75,7 +75,7 @@ void AccessLogFormatParser::parseCommandHeader(const std::string& token, const s if (subs.size() > 1) { throw EnvoyException( // Header format rules support only one alternative header. - // https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/access_log.rst#format-rules + // docs/root/configuration/access_log.rst#format-rules fmt::format("More than 1 alternative header specified in token: {}", token)); } if (subs.size() == 1) { diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h index 95a508aebb7c..2fc5d7826db7 100644 --- a/source/common/access_log/access_log_formatter.h +++ b/source/common/access_log/access_log_formatter.h @@ -27,7 +27,7 @@ class AccessLogFormatParser { * Parse a header format rule of the form: %REQ(X?Y):Z% . * Will populate a main_header and an optional alternative header if specified. * See doc: - * https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/access_log.rst#format-rules + * docs/root/configuration/access_log.rst#format-rules */ static void parseCommandHeader(const std::string& token, const size_t start, std::string& main_header, std::string& alternative_header, diff --git a/source/common/common/BUILD b/source/common/common/BUILD index 31e504d56df8..ed18ecb0ef82 100644 --- a/source/common/common/BUILD +++ b/source/common/common/BUILD @@ -140,7 +140,7 @@ envoy_cc_library( genrule( name = "generate_version_number", - srcs = ["@envoy_api//:VERSION"], + srcs = ["//:VERSION"], outs = ["version_number.h"], cmd = """echo "#define BUILD_VERSION_NUMBER \\"$$(cat $<)\\"" >$@""", ) diff --git a/source/extensions/access_loggers/file/BUILD b/source/extensions/access_loggers/file/BUILD index d5063ec1fa2f..afbcaaa2920f 100644 --- a/source/extensions/access_loggers/file/BUILD +++ b/source/extensions/access_loggers/file/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Access log implementation that writes to a file. -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/access_log.rst +# Public docs: docs/root/configuration/access_log.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/common/ext_authz/ext_authz_impl.h b/source/extensions/filters/common/ext_authz/ext_authz_impl.h index c771ad5fd56d..f3266c4875b2 100644 --- a/source/extensions/filters/common/ext_authz/ext_authz_impl.h +++ b/source/extensions/filters/common/ext_authz/ext_authz_impl.h @@ -72,7 +72,7 @@ class GrpcClientImpl : public Client, public ExtAuthzAsyncCallbacks { * and fill out the details in the authorization protobuf that is sent to authorization * service. * The specific information in the request is as per the specification in the - * data-plane-api. + * data plane API. */ class CheckRequestUtils { public: diff --git a/source/extensions/filters/http/buffer/BUILD b/source/extensions/filters/http/buffer/BUILD index 9b7af18c3990..88dc1a0b75c4 100644 --- a/source/extensions/filters/http/buffer/BUILD +++ b/source/extensions/filters/http/buffer/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Request buffering and timeout L7 HTTP filter -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/buffer_filter.rst +# Public docs: docs/root/configuration/http_filters/buffer_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/cors/BUILD b/source/extensions/filters/http/cors/BUILD index 5a8bdfcec6ae..8399cfe3f408 100644 --- a/source/extensions/filters/http/cors/BUILD +++ b/source/extensions/filters/http/cors/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter which implements CORS processing (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/cors_filter.rst +# Public docs: docs/root/configuration/http_filters/cors_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/dynamo/BUILD b/source/extensions/filters/http/dynamo/BUILD index 9ae21e7c0fdc..0e93b18a0317 100644 --- a/source/extensions/filters/http/dynamo/BUILD +++ b/source/extensions/filters/http/dynamo/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # AWS DynamoDB L7 HTTP filter (observability): https://aws.amazon.com/dynamodb/ -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/dynamodb_filter.rst +# Public docs: docs/root/configuration/http_filters/dynamodb_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/ext_authz/BUILD b/source/extensions/filters/http/ext_authz/BUILD index 05748ed7887a..9f08df77d899 100644 --- a/source/extensions/filters/http/ext_authz/BUILD +++ b/source/extensions/filters/http/ext_authz/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # External authorization L7 HTTP filter -# Public docs: TODO(saumoh): Docs needed in https://github.com/envoyproxy/data-plane-api/tree/master/docs/root/configuration/http_filters +# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/http_filters load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/fault/BUILD b/source/extensions/filters/http/fault/BUILD index 694a185d5625..f0207cbd3a99 100644 --- a/source/extensions/filters/http/fault/BUILD +++ b/source/extensions/filters/http/fault/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that injects faults into the request flow -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/fault_filter.rst +# Public docs: docs/root/configuration/http_filters/fault_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/grpc_http1_bridge/BUILD b/source/extensions/filters/http/grpc_http1_bridge/BUILD index 09c9688cad17..91325b4143da 100644 --- a/source/extensions/filters/http/grpc_http1_bridge/BUILD +++ b/source/extensions/filters/http/grpc_http1_bridge/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that bridges HTTP/1.1 unary "gRPC" to compliant HTTP/2 gRPC. -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst +# Public docs: docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/grpc_json_transcoder/BUILD b/source/extensions/filters/http/grpc_json_transcoder/BUILD index 310c252c1323..0ae5c042f056 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/BUILD +++ b/source/extensions/filters/http/grpc_json_transcoder/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements binary gRPC to JSON transcoding -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst +# Public docs: docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/grpc_web/BUILD b/source/extensions/filters/http/grpc_web/BUILD index 40ed02a595a2..e9552b2bc70f 100644 --- a/source/extensions/filters/http/grpc_web/BUILD +++ b/source/extensions/filters/http/grpc_web/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements the grpc-web protocol (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md) -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/grpc_web_filter.rst +# Public docs: docs/root/configuration/http_filters/grpc_web_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/gzip/BUILD b/source/extensions/filters/http/gzip/BUILD index f14339bf0bf1..32598914f180 100644 --- a/source/extensions/filters/http/gzip/BUILD +++ b/source/extensions/filters/http/gzip/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that performs gzip compression -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/gzip_filter.rst +# Public docs: docs/root/configuration/http_filters/gzip_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/health_check/BUILD b/source/extensions/filters/http/health_check/BUILD index 18f5b94901d5..82ae8ce467e8 100644 --- a/source/extensions/filters/http/health_check/BUILD +++ b/source/extensions/filters/http/health_check/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements health check responses -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/health_check_filter.rst +# Public docs: docs/root/configuration/http_filters/health_check_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/ip_tagging/BUILD b/source/extensions/filters/http/ip_tagging/BUILD index 916f5af94ead..b05c15facda8 100644 --- a/source/extensions/filters/http/ip_tagging/BUILD +++ b/source/extensions/filters/http/ip_tagging/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter that writes an IP tagging header based on IP trie data -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/ip_tagging_filter.rst +# Public docs: docs/root/configuration/http_filters/ip_tagging_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/lua/BUILD b/source/extensions/filters/http/lua/BUILD index e86cc27fbf79..f7ef05d8fa0f 100644 --- a/source/extensions/filters/http/lua/BUILD +++ b/source/extensions/filters/http/lua/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Lua scripting L7 HTTP filter (https://www.lua.org/, http://luajit.org/) -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/lua_filter.rst +# Public docs: docs/root/configuration/http_filters/lua_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/ratelimit/BUILD b/source/extensions/filters/http/ratelimit/BUILD index c57b96fde4be..2274220ecbbd 100644 --- a/source/extensions/filters/http/ratelimit/BUILD +++ b/source/extensions/filters/http/ratelimit/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Ratelimit L7 HTTP filter -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/rate_limit_filter.rst +# Public docs: docs/root/configuration/http_filters/rate_limit_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/router/BUILD b/source/extensions/filters/http/router/BUILD index a873af22bee6..dcf365fa8c32 100644 --- a/source/extensions/filters/http/router/BUILD +++ b/source/extensions/filters/http/router/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # HTTP L7 filter responsible for routing to upstream connection pools -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/router_filter.rst +# Public docs: docs/root/configuration/http_filters/router_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/squash/BUILD b/source/extensions/filters/http/squash/BUILD index 1b01a99e7c77..d001dec67e6a 100644 --- a/source/extensions/filters/http/squash/BUILD +++ b/source/extensions/filters/http/squash/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # L7 HTTP filter that implements the Squash microservice debugger -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_filters/squash_filter.rst +# Public docs: docs/root/configuration/http_filters/squash_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/http/squash/squash_filter.cc b/source/extensions/filters/http/squash/squash_filter.cc index 83a417c383ac..6237ea4ef61a 100644 --- a/source/extensions/filters/http/squash/squash_filter.cc +++ b/source/extensions/filters/http/squash/squash_filter.cc @@ -85,8 +85,7 @@ void SquashFilterConfig::updateTemplateInValue(ProtobufWkt::Value& curvalue) { To interpolate an environment variable named ENV, add '{{ ENV }}' (without the quotes, with the spaces) to the template string. - See https://github.com/envoyproxy/data-plane-api/blob/master/api/filter/http/squash.proto#L36 for - the motivation on why this is needed. + See api/envoy/config/filter/http/squash/v2/squash.proto for the motivation on why this is needed. */ std::string SquashFilterConfig::replaceEnv(const std::string& attachment_template) { std::string s; diff --git a/source/extensions/filters/listener/original_dst/BUILD b/source/extensions/filters/listener/original_dst/BUILD index f898bb4f615b..c1fd7812ecbf 100644 --- a/source/extensions/filters/listener/original_dst/BUILD +++ b/source/extensions/filters/listener/original_dst/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # ORIGINAL_DST iptables redirection listener filter -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/listener_filters/original_dst_filter.rst +# Public docs: docs/root/configuration/listener_filters/original_dst_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/client_ssl_auth/BUILD b/source/extensions/filters/network/client_ssl_auth/BUILD index e105a6b168bd..61bfabb428aa 100644 --- a/source/extensions/filters/network/client_ssl_auth/BUILD +++ b/source/extensions/filters/network/client_ssl_auth/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Client SSL authorization L4 network filter -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/network_filters/client_ssl_auth_filter.rst +# Public docs: docs/root/configuration/network_filters/client_ssl_auth_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/echo/BUILD b/source/extensions/filters/network/echo/BUILD index 6a91128007f7..4f40cfbfe463 100644 --- a/source/extensions/filters/network/echo/BUILD +++ b/source/extensions/filters/network/echo/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Echo L4 network filter. This is primarily a simplistic example. -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/network_filters/echo_filter.rst +# Public docs: docs/root/configuration/network_filters/echo_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/ext_authz/BUILD b/source/extensions/filters/network/ext_authz/BUILD index 8b1ea882bc83..39b3cbc0a475 100644 --- a/source/extensions/filters/network/ext_authz/BUILD +++ b/source/extensions/filters/network/ext_authz/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # External authorization L4 network filter -# Public docs: TODO(saumoh): Docs needed in https://github.com/envoyproxy/data-plane-api/tree/master/docs/root/configuration/network_filters +# Public docs: TODO(saumoh): Docs needed in docs/root/configuration/network_filters load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/http_connection_manager/BUILD b/source/extensions/filters/network/http_connection_manager/BUILD index 7a55e57af592..f2168fde3ba2 100644 --- a/source/extensions/filters/network/http_connection_manager/BUILD +++ b/source/extensions/filters/network/http_connection_manager/BUILD @@ -1,7 +1,7 @@ licenses(["notice"]) # Apache 2 # L4 network filter that implements HTTP protocol handling and filtering. This filter internally # drives all of the L7 HTTP filters. -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/http_conn_man/http_conn_man.rst +# Public docs: docs/root/configuration/http_conn_man/http_conn_man.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/mongo_proxy/BUILD b/source/extensions/filters/network/mongo_proxy/BUILD index 6062c2f926b4..4c01aa2c2402 100644 --- a/source/extensions/filters/network/mongo_proxy/BUILD +++ b/source/extensions/filters/network/mongo_proxy/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Mongo proxy L4 network filter (observability and fault injection). -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/network_filters/mongo_proxy_filter.rst +# Public docs: docs/root/configuration/network_filters/mongo_proxy_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/ratelimit/BUILD b/source/extensions/filters/network/ratelimit/BUILD index 6f003235a2a7..36a7a83240f7 100644 --- a/source/extensions/filters/network/ratelimit/BUILD +++ b/source/extensions/filters/network/ratelimit/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # Ratelimit L4 network filter -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/network_filters/rate_limit_filter.rst +# Public docs: docs/root/configuration/network_filters/rate_limit_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index e1c284ed84a3..8173c08444ce 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -1,7 +1,7 @@ licenses(["notice"]) # Apache 2 # Redis proxy L4 network filter. Implements consistent hashing and observability for large redis # clusters. -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/network_filters/redis_proxy_filter.rst +# Public docs: docs/root/configuration/network_filters/redis_proxy_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/filters/network/tcp_proxy/BUILD b/source/extensions/filters/network/tcp_proxy/BUILD index 2c3c4c5f9e21..aa8518611413 100644 --- a/source/extensions/filters/network/tcp_proxy/BUILD +++ b/source/extensions/filters/network/tcp_proxy/BUILD @@ -1,6 +1,6 @@ licenses(["notice"]) # Apache 2 # TCP proxy L4 network filter. -# Public docs: https://github.com/envoyproxy/data-plane-api/blob/master/docs/root/configuration/network_filters/tcp_proxy_filter.rst +# Public docs: docs/root/configuration/network_filters/tcp_proxy_filter.rst load( "//bazel:envoy_build_system.bzl", diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index a38323dc115a..4f7e60d83b6c 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -1,5 +1,5 @@ licenses(["notice"]) # Apache 2 -# Stats sink for the gRPC metrics service: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/metrics/v2/metrics_service.proto +# Stats sink for the gRPC metrics service: api/envoy/service/metrics/v2/metrics_service.proto load( "//bazel:envoy_build_system.bzl", diff --git a/sync.sh b/sync.sh new file mode 100755 index 000000000000..25f2b30e44ec --- /dev/null +++ b/sync.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Temporary script for synchronizing repos + +rsync -av ../envoy-api/envoy ./api/ +rsync -av ../envoy-api/bazel ./api/ +rsync -av ../envoy-api/VERSION ./ +rsync -av ../envoy-api/BUILD ./ +rsync -av ../envoy-api/docs/{root,conf.py,requirements.txt} ./docs +rsync -av ../envoy-api/docs/BUILD ./api/docs +rsync -av ../envoy-api/tools/protodoc ./tools +rsync -av ../envoy-api/diagrams ./api +rsync -av ../envoy-api/*.md ./api +rsync -av ../envoy-api/test ./api + +echo "// NOLINT(namespace-envoy)" | cat - ./api/test/validate/pgv_test.cc > /tmp/foo.txt +mv /tmp/foo.txt ./api/test/validate/pgv_test.cc +echo "// NOLINT(namespace-envoy)" | cat - ./api/test/build/build_test.cc > /tmp/foo.txt +mv /tmp/foo.txt ./api/test/build/build_test.cc + +#ENVOY_DOCKER_BUILD_DIR=~/tmp/envoy-docker-build ./ci/run_envoy_docker.sh './ci/do_ci.sh fix_format' + +#(cd api; +# replace "\"//envoy/" "\"//api/envoy/"; +# replace "//bazel:api_build_system.bzl" "//api/bazel:api_build_system.bzl"; +# replace "@envoy_api//api" "//api") +#sed -i "s#load(\"//api#load(\"@envoy//api#" api/bazel/repositories.bzl + diff --git a/tools/check_format.py b/tools/check_format.py index 43e35f50aa6a..dcca2c812b37 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -17,7 +17,7 @@ DOCS_SUFFIX = (".md", ".rst") # Files in these paths can make reference to protobuf stuff directly -GOOGLE_PROTOBUF_WHITELIST = ('ci/prebuilt', 'source/common/protobuf') +GOOGLE_PROTOBUF_WHITELIST = ('ci/prebuilt', 'source/common/protobuf', 'api/test') CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-5.0") BUILDIFIER_PATH = os.getenv("BUILDIFIER_BIN", "$GOPATH/bin/buildifier") @@ -93,6 +93,10 @@ def checkProtobufExternalDeps(file_path): return [] +def isApiFile(file_path): + return file_path.startswith('./api/') + + def isBuildFile(file_path): basename = os.path.basename(file_path) if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): @@ -146,8 +150,12 @@ def fixFileContents(file_path): def checkFilePath(file_path): error_messages = [] if isBuildFile(file_path): - command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path) - error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path) + # TODO(htuch): Add API specific BUILD fixer script. + if not isApiFile(file_path): + command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, + file_path) + error_messages += executeCommand( + command, "envoy_build_fixer check failed", file_path) command = "cat %s | %s -mode=fix | diff %s -" % (file_path, BUILDIFIER_PATH, file_path) error_messages += executeCommand(command, "buildifier check failed", file_path) @@ -183,14 +191,14 @@ def executeCommand(command, error_message, file_path, return output.split("\n") return [] except subprocess.CalledProcessError as e: - if (e.returncode != 0 and e.returncode != 1): - return ["ERROR: something went wrong while executing: %s" % e.cmd] - # In case we can't find any line numbers, record an error message first. - error_messages = ["%s for file: %s" % (error_message, file_path)] - for line in e.output.splitlines(): - for num in regex.findall(line): - error_messages.append(" %s:%s" % (file_path, num)) - return error_messages + if (e.returncode != 0 and e.returncode != 1): + return ["ERROR: something went wrong while executing: %s" % e.cmd] + # In case we can't find any line numbers, record an error message first. + error_messages = ["%s for file: %s" % (error_message, file_path)] + for line in e.output.splitlines(): + for num in regex.findall(line): + error_messages.append(" %s:%s" % (file_path, num)) + return error_messages def fixHeaderOrder(file_path): command = "%s --rewrite %s" % (HEADER_ORDER_PATH, file_path) @@ -206,8 +214,11 @@ def clangFormat(file_path): def fixFilePath(file_path): if isBuildFile(file_path): - if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: - return ["envoy_build_fixer rewrite failed for file: %s" % file_path] + # TODO(htuch): Add API specific BUILD fixer script. + if not isApiFile(file_path): + if os.system( + "%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0: + return ["envoy_build_fixer rewrite failed for file: %s" % file_path] if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: return ["buildifier rewrite failed for file: %s" % file_path] return [] diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD new file mode 100644 index 000000000000..697338dca370 --- /dev/null +++ b/tools/protodoc/BUILD @@ -0,0 +1,11 @@ +licenses(["notice"]) # Apache 2 + +py_binary( + name = "protodoc", + srcs = ["protodoc.py"], + visibility = ["//visibility:public"], + deps = [ + "@com_google_protobuf//:protobuf_python", + "@com_lyft_protoc_gen_validate//validate:validate_py", + ], +) diff --git a/tools/protodoc/protodoc.bzl b/tools/protodoc/protodoc.bzl new file mode 100644 index 000000000000..dc55a5f04756 --- /dev/null +++ b/tools/protodoc/protodoc.bzl @@ -0,0 +1,80 @@ +# Borrowed from +# https://github.com/bazelbuild/rules_go/blob/master/proto/toolchain.bzl. This +# does some magic munging to remove workspace prefixes from output paths to +# convert path as understood by Bazel into paths as understood by protoc. +def _proto_path(proto): + """ + The proto path is not really a file path + It's the path to the proto that was seen when the descriptor file was generated. + """ + path = proto.path + root = proto.root.path + ws = proto.owner.workspace_root + if path.startswith(root): path = path[len(root):] + if path.startswith("/"): path = path[1:] + if path.startswith(ws): path = path[len(ws):] + if path.startswith("/"): path = path[1:] + return path + +# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) +# that can be invoked from the CLI to produce docs via //tools/protodoc for +# proto_library targets. Example use: +# +# bazel build //api --aspects tools/protodoc/protodoc.bzl%proto_doc_aspect \ +# --output_groups=rst +# +# The aspect builds the transitive docs, so any .proto in the dependency graph +# get docs created. +def _proto_doc_aspect_impl(target, ctx): + # Compute RST files from the current proto_library node's dependencies. + transitive_outputs = depset() + for dep in ctx.rule.attr.deps: + transitive_outputs = transitive_outputs | dep.output_groups["rst"] + proto_sources = target.proto.direct_sources + # If this proto_library doesn't actually name any sources, e.g. //api:api, + # but just glues together other libs, we just need to follow the graph. + if not proto_sources: + return [OutputGroupInfo(rst=transitive_outputs)] + # Figure out the set of import paths. Ideally we would use descriptor sets + # built by proto_library, which avoid having to do nasty path mangling, but + # these don't include source_code_info, which we need for comment + # extractions. See https://github.com/bazelbuild/bazel/issues/3971. + import_paths = [] + for f in target.proto.transitive_sources: + if f.root.path: + import_path = f.root.path + "/" + f.owner.workspace_root + else: + import_path = f.owner.workspace_root + if import_path: + import_paths += [import_path] + # The outputs live in the ctx.label's package root. We add some additional + # path information to match with protoc's notion of path relative locations. + outputs = [ctx.actions.declare_file(ctx.label.name + "/" + _proto_path(f) + + ".rst") for f in proto_sources] + # Create the protoc command-line args. + ctx_path = ctx.label.package + "/" + ctx.label.name + output_path = outputs[0].root.path + "/" + outputs[0].owner.workspace_root + "/" + ctx_path + args = ["-I./" + ctx.label.workspace_root] + args += ["-I" + import_path for import_path in import_paths] + args += ["--plugin=protoc-gen-protodoc=" + ctx.executable._protodoc.path, "--protodoc_out=" + output_path] + args += [_proto_path(src) for src in target.proto.direct_sources] + ctx.action(executable=ctx.executable._protoc, + arguments=args, + inputs=[ctx.executable._protodoc] + target.proto.transitive_sources.to_list(), + outputs=outputs, + mnemonic="ProtoDoc", + use_default_shell_env=True) + transitive_outputs = depset(outputs) | transitive_outputs + return [OutputGroupInfo(rst=transitive_outputs)] + +proto_doc_aspect = aspect(implementation = _proto_doc_aspect_impl, + attr_aspects = ["deps"], + attrs = { + "_protoc": attr.label(default=Label("@com_google_protobuf//:protoc"), + executable=True, + cfg="host"), + "_protodoc": attr.label(default=Label("//tools/protodoc"), + executable=True, + cfg="host"), + } +) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py new file mode 100755 index 000000000000..876a84ba3463 --- /dev/null +++ b/tools/protodoc/protodoc.py @@ -0,0 +1,722 @@ +# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. +# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto +# for the underlying protos mentioned in this file. See +# http://www.sphinx-doc.org/en/stable/rest.html for Sphinx RST syntax. + +from collections import defaultdict +import cProfile +import functools +import os +import pstats +import StringIO +import sys +import re + +from google.protobuf.compiler import plugin_pb2 +from validate import validate_pb2 + +# Namespace prefix for Envoy core APIs. +ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' + +# Namespace prefix for Envoy top-level APIs. +ENVOY_PREFIX = '.envoy.' + +# Namespace prefix for WKTs. +WKT_NAMESPACE_PREFIX = '.google.protobuf.' + +# Namespace prefix for RPCs. +RPC_NAMESPACE_PREFIX = '.google.rpc.' + +# http://www.fileformat.info/info/unicode/char/2063/index.htm +UNICODE_INVISIBLE_SEPARATOR = u'\u2063' + +# Key-value annotation regex. +ANNOTATION_REGEX = re.compile('\[#([\w-]+?):(.*?)\]\s?', re.DOTALL) + +# Page/section titles with special prefixes in the proto comments +DOC_TITLE_ANNOTATION = 'protodoc-title' + +# Not implemented yet annotation on leading comments, leading to insertion of +# warning on field. +NOT_IMPLEMENTED_WARN_ANNOTATION = 'not-implemented-warn' + +# Not implemented yet annotation on leading comments, leading to hiding of +# field. +NOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide' + +# Comment. Just used for adding text that will not go into the docs at all. +COMMENT_ANNOTATION = 'comment' + +# proto compatibility status. +PROTO_STATUS_ANNOTATION = 'proto-status' + +# Where v2 differs from v1.. +V2_API_DIFF_ANNOTATION = 'v2-api-diff' + +VALID_ANNOTATIONS = set([ + DOC_TITLE_ANNOTATION, + NOT_IMPLEMENTED_WARN_ANNOTATION, + NOT_IMPLEMENTED_HIDE_ANNOTATION, + V2_API_DIFF_ANNOTATION, + COMMENT_ANNOTATION, + PROTO_STATUS_ANNOTATION, +]) + +# These can propagate from file scope to message/enum scope (and be overridden). +INHERITED_ANNOTATIONS = set([ + PROTO_STATUS_ANNOTATION, +]) + +# Template for data plane API URLs. +# TODO(htuch): Add the ability to build a permalink by feeding a hash +# to the tool or inferring from local tree (only really make sense in CI). +DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/master/api/%s#L%d' + + +class ProtodocError(Exception): + """Base error class for the protodoc module.""" + + +def FormatCommentWithAnnotations(s, annotations, type_name): + if NOT_IMPLEMENTED_WARN_ANNOTATION in annotations: + s += '\n.. WARNING::\n Not implemented yet\n' + if V2_API_DIFF_ANNOTATION in annotations: + s += '\n.. NOTE::\n **v2 API difference**: ' + annotations[V2_API_DIFF_ANNOTATION] + '\n' + if type_name == 'message' or type_name == 'enum': + if PROTO_STATUS_ANNOTATION in annotations: + status = annotations[PROTO_STATUS_ANNOTATION] + if status not in ['frozen', 'draft', 'experimental']: + raise ProtodocError('Unknown proto status: %s' % status) + if status == 'draft' or status == 'experimental': + s += ('\n.. WARNING::\n This %s type has :ref:`%s ' + '` status.\n' % (type_name, status)) + return s + + +def ExtractAnnotations(s, inherited_annotations=None, type_name='file'): + """Extract annotations from a given comment string. + + Args: + s: string that may contains annotations. + inherited_annotations: annotation map from file-level inherited annotations + (or None) if this is a file-level comment. + Returns: + Pair of string with with annotations stripped and annotation map. + """ + annotations = { + k: v + for k, v in (inherited_annotations or {}).items() + if k in INHERITED_ANNOTATIONS + } + # Extract annotations. + groups = re.findall(ANNOTATION_REGEX, s) + # Remove annotations. + without_annotations = re.sub(ANNOTATION_REGEX, '', s) + for group in groups: + annotation = group[0] + if annotation not in VALID_ANNOTATIONS: + raise ProtodocError('Unknown annotation: %s' % annotation) + annotations[group[0]] = group[1].lstrip() + return FormatCommentWithAnnotations(without_annotations, annotations, + type_name), annotations + + +class SourceCodeInfo(object): + """Wrapper for SourceCodeInfo proto.""" + + def __init__(self, name, source_code_info): + self._name = name + self._proto = source_code_info + self._leading_comments = {str(location.path): location.leading_comments for location in self._proto.location} + self._file_level_comment = None + + @property + def file_level_comment(self): + """Obtain inferred file level comment.""" + if self._file_level_comment: + return self._file_level_comment + comment = '' + earliest_detached_comment = max( + max(location.span) for location in self._proto.location) + for location in self._proto.location: + if location.leading_detached_comments and location.span[0] < earliest_detached_comment: + comment = StripLeadingSpace(''.join( + location.leading_detached_comments)) + '\n' + earliest_detached_comment = location.span[0] + self._file_level_comment = comment + return comment + + def LeadingCommentPathLookup(self, path, type_name): + """Lookup leading comment by path in SourceCodeInfo. + + Args: + path: a list of path indexes as per + https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + type_name: name of type the comment belongs to. + Returns: + Pair of attached leading comment and Annotation objects, where there is a + leading comment + otherwise ('', []). + """ + leading_comment = self._leading_comments.get(str(path), None) + if leading_comment is not None: + _, file_annotations = ExtractAnnotations(self.file_level_comment) + return ExtractAnnotations( + StripLeadingSpace(leading_comment) + '\n', file_annotations, + type_name) + return '', [] + + def GithubUrl(self, path): + """Obtain data plane API Github URL by path from SourceCodeInfo. + + Args: + path: a list of path indexes as per + https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + Returns: + A string with a corresponding data plane API GitHub Url. + """ + for location in self._proto.location: + if location.path == path: + return DATA_PLANE_API_URL_FMT % (self._name, location.span[0]) + return '' + + +class TypeContext(object): + """Contextual information for a message/field. + + Provides information around namespaces and enclosing types for fields and + nested messages/enums. + """ + + def __init__(self, source_code_info, name): + # SourceCodeInfo as per + # https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto. + self.source_code_info = source_code_info + # path: a list of path indexes as per + # https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. + # Extended as nested objects are traversed. + self.path = [] + # Message/enum/field name. Extended as nested objects are traversed. + self.name = name + # Map from type name to the correct type annotation string, e.g. from + # ".envoy.api.v2.Foo.Bar" to "map". This is lost during + # proto synthesis and is dynamically recovered in FormatMessage. + self.map_typenames = {} + # Map from a message's oneof index to the fields sharing a oneof. + self.oneof_fields = {} + # Map from a message's oneof index to the "required" bool property. + self.oneof_required = {} + self.type_name = 'file' + + def _Extend(self, path, type_name, name): + if not self.name: + extended_name = name + else: + extended_name = '%s.%s' % (self.name, name) + extended = TypeContext(self.source_code_info, extended_name) + extended.path = self.path + path + extended.type_name = type_name + extended.map_typenames = self.map_typenames.copy() + extended.oneof_fields = self.oneof_fields.copy() + extended.oneof_required = self.oneof_required.copy() + return extended + + def ExtendMessage(self, index, name): + """Extend type context with a message. + + Args: + index: message index in file. + name: message name. + """ + return self._Extend([4, index], 'message', name) + + def ExtendNestedMessage(self, index, name): + """Extend type context with a nested message. + + Args: + index: nested message index in message. + name: message name. + """ + return self._Extend([3, index], 'message', name) + + def ExtendField(self, index, name): + """Extend type context with a field. + + Args: + index: field index in message. + name: field name. + """ + return self._Extend([2, index], 'field', name) + + def ExtendEnum(self, index, name): + """Extend type context with an enum. + + Args: + index: enum index in file. + name: enum name. + """ + return self._Extend([5, index], 'enum', name) + + def ExtendNestedEnum(self, index, name): + """Extend type context with a nested enum. + + Args: + index: enum index in message. + name: enum name. + """ + return self._Extend([4, index], 'enum', name) + + def ExtendEnumValue(self, index, name): + """Extend type context with an enum enum. + + Args: + index: enum value index in enum. + name: value name. + """ + return self._Extend([2, index], 'enum_value', name) + + def LeadingCommentPathLookup(self): + return self.source_code_info.LeadingCommentPathLookup( + self.path, self.type_name) + + def GithubUrl(self): + return self.source_code_info.GithubUrl(self.path) + + +def MapLines(f, s): + """Apply a function across each line in a flat string. + + Args: + f: A string transform function for a line. + s: A string consisting of potentially multiple lines. + Returns: + A flat string with f applied to each line. + """ + return '\n'.join(f(line) for line in s.split('\n')) + + +def Indent(spaces, line): + """Indent a string.""" + return ' ' * spaces + line + + +def IndentLines(spaces, lines): + """Indent a list of strings.""" + return map(functools.partial(Indent, spaces), lines) + + +def FormatInternalLink(text, ref): + return ':ref:`%s <%s>`' % (text, ref) + + +def FormatExternalLink(text, ref): + return '`%s <%s>`_' % (text, ref) + + +def FormatHeader(style, text): + """Format RST header. + + Args: + style: underline style, e.g. '=', '-'. + text: header text + Returns: + RST formatted header. + """ + return '%s\n%s\n\n' % (text, style * len(text)) + + +def FormatHeaderFromFile(style, file_level_comment, alt): + """Format RST header based on special file level title + + Args: + style: underline style, e.g. '=', '-'. + file_level_comment: detached comment at top of file. + alt: If the file_level_comment does not contain a user + specified title, use the alt text as page title. + Returns: + RST formatted header, and file level comment without page title strings. + """ + anchor = FormatAnchor(FileCrossRefLabel(alt)) + stripped_comment, annotations = ExtractAnnotations(file_level_comment) + if DOC_TITLE_ANNOTATION in annotations: + return anchor + FormatHeader( + style, annotations[DOC_TITLE_ANNOTATION]), stripped_comment + return anchor + FormatHeader(style, alt), stripped_comment + + +def FormatFieldTypeAsJson(type_context, field): + """Format FieldDescriptorProto.Type as a pseudo-JSON string. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + Return: + RST formatted pseudo-JSON string representation of field type. + """ + if NormalizeFQN(field.type_name) in type_context.map_typenames: + return '"{...}"' + if field.label == field.LABEL_REPEATED: + return '[]' + if field.type == field.TYPE_MESSAGE: + return '"{...}"' + return '"..."' + + +def FormatMessageAsJson(type_context, msg): + """Format a message definition DescriptorProto as a pseudo-JSON block. + + Args: + type_context: contextual information for message/enum/field. + msg: message definition DescriptorProto. + Return: + RST formatted pseudo-JSON string representation of message definition. + """ + lines = [] + for index, field in enumerate(msg.field): + field_type_context = type_context.ExtendField(index, field.name) + leading_comment, comment_annotations = field_type_context.LeadingCommentPathLookup( + ) + if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations: + continue + lines.append('"%s": %s' % (field.name, + FormatFieldTypeAsJson(type_context, field))) + + if lines: + return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines( + 4, lines)) + '\n }\n\n' + else: + return '.. code-block:: json\n\n {}\n\n' + + +def NormalizeFQN(fqn): + """Normalize a fully qualified field type name. + + Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX and makes pretty wrapped type names. + + Args: + fqn: a fully qualified type name from FieldDescriptorProto.type_name. + Return: + Normalized type name. + """ + if fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): + return fqn[len(ENVOY_API_NAMESPACE_PREFIX):] + if fqn.startswith(ENVOY_PREFIX): + return fqn[len(ENVOY_PREFIX):] + return fqn + + +def FormatEmph(s): + """RST format a string for emphasis.""" + return '*%s*' % s + + +def FormatFieldType(type_context, field): + """Format a FieldDescriptorProto type description. + + Adds cross-refs for message types. + TODO(htuch): Add cross-refs for enums as well. + + Args: + type_context: contextual information for message/enum/field. + field: FieldDescriptor proto. + Return: + RST formatted field type. + """ + if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(ENVOY_PREFIX): + type_name = NormalizeFQN(field.type_name) + if field.type == field.TYPE_MESSAGE: + if type_context.map_typenames and type_name in type_context.map_typenames: + return type_context.map_typenames[type_name] + return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) + if field.type == field.TYPE_ENUM: + return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) + elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): + wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] + return FormatExternalLink( + wkt, + 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' + % wkt.lower()) + elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): + rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] + return FormatExternalLink( + rpc, + 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' + % rpc.lower()) + elif field.type_name: + return field.type_name + + pretty_type_names = { + field.TYPE_DOUBLE: 'double', + field.TYPE_FLOAT: 'float', + field.TYPE_INT32: 'int32', + field.TYPE_SFIXED32: 'int32', + field.TYPE_SINT32: 'int32', + field.TYPE_FIXED32: 'uint32', + field.TYPE_UINT32: 'uint32', + field.TYPE_INT64: 'int64', + field.TYPE_SFIXED64: 'int64', + field.TYPE_SINT64: 'int64', + field.TYPE_FIXED64: 'uint64', + field.TYPE_UINT64: 'uint64', + field.TYPE_BOOL: 'bool', + field.TYPE_STRING: 'string', + field.TYPE_BYTES: 'bytes', + } + if field.type in pretty_type_names: + return FormatExternalLink( + pretty_type_names[field.type], + 'https://developers.google.com/protocol-buffers/docs/proto#scalar') + raise ProtodocError('Unknown field type ' + str(field.type)) + + +def StripLeadingSpace(s): + """Remove leading space in flat comment strings.""" + return MapLines(lambda s: s[1:], s) + + +def FileCrossRefLabel(msg_name): + """File cross reference label.""" + return 'envoy_api_file_%s' % msg_name + + +def MessageCrossRefLabel(msg_name): + """Message cross reference label.""" + return 'envoy_api_msg_%s' % msg_name + + +def EnumCrossRefLabel(enum_name): + """Enum cross reference label.""" + return 'envoy_api_enum_%s' % enum_name + + +def FieldCrossRefLabel(field_name): + """Field cross reference label.""" + return 'envoy_api_field_%s' % field_name + + +def EnumValueCrossRefLabel(enum_value_name): + """Enum value cross reference label.""" + return 'envoy_api_enum_value_%s' % enum_value_name + + +def FormatAnchor(label): + """Format a label as an Envoy API RST anchor.""" + return '.. _%s:\n\n' % label + + +def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field): + """Format a FieldDescriptorProto as RST definition list item. + + Args: + outer_type_context: contextual information for enclosing message. + type_context: contextual information for message/enum/field. + field: FieldDescriptorProto. + Returns: + RST formatted definition list item. + """ + if field.HasField('oneof_index'): + oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ + field.oneof_index] else '\nOnly one of %s may be set.\n' + oneof_comment = oneof_template % ', '.join( + FormatInternalLink( + f, FieldCrossRefLabel(outer_type_context.ExtendField(0, f).name)) + for f in type_context.oneof_fields[field.oneof_index]) + else: + oneof_comment = '' + anchor = FormatAnchor(FieldCrossRefLabel(type_context.name)) + annotations = [] + if field.options.HasExtension(validate_pb2.rules): + rule = field.options.Extensions[validate_pb2.rules] + if ((rule.HasField('message') and rule.message.required) or + (rule.HasField('string') and rule.string.min_bytes > 0) or + (rule.HasField('repeated') and rule.repeated.min_items > 0)): + annotations.append('*REQUIRED*') + leading_comment, comment_annotations = type_context.LeadingCommentPathLookup() + if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations: + return '' + comment = '(%s) ' % ', '.join( + [FormatFieldType(type_context, field)] + annotations) + leading_comment + return anchor + field.name + '\n' + MapLines( + functools.partial(Indent, 2), comment + oneof_comment) + + +def FormatMessageAsDefinitionList(type_context, msg): + """Format a DescriptorProto as RST definition list. + + Args: + type_context: contextual information for message/enum/field. + msg: DescriptorProto. + Returns: + RST formatted definition list item. + """ + type_context.oneof_fields = defaultdict(list) + type_context.oneof_required = defaultdict(bool) + for index, field in enumerate(msg.field): + if field.HasField('oneof_index'): + _, comment_annotations = type_context.ExtendField( + index, field.name).LeadingCommentPathLookup() + if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations: + continue + type_context.oneof_fields[field.oneof_index].append(field.name) + for index, oneof_decl in enumerate(msg.oneof_decl): + if oneof_decl.options.HasExtension(validate_pb2.required): + type_context.oneof_required[index] = oneof_decl.options.Extensions[ + validate_pb2.required] + return '\n'.join( + FormatFieldAsDefinitionListItem( + type_context, type_context.ExtendField(index, field.name), field) + for index, field in enumerate(msg.field)) + '\n' + + +def FormatMessage(type_context, msg): + """Format a DescriptorProto as RST section. + + Args: + type_context: contextual information for message/enum/field. + msg: DescriptorProto. + Returns: + RST formatted section. + """ + # Skip messages synthesized to represent map types. + if msg.options.map_entry: + return '' + # We need to do some extra work to recover the map type annotation from the + # synthesized messages. + type_context.map_typenames = { + '%s.%s' % (type_context.name, nested_msg.name): 'map<%s, %s>' % tuple( + map( + functools.partial(FormatFieldType, type_context), + nested_msg.field)) + for nested_msg in msg.nested_type + if nested_msg.options.map_entry + } + nested_msgs = '\n'.join( + FormatMessage( + type_context.ExtendNestedMessage(index, nested_msg.name), nested_msg) + for index, nested_msg in enumerate(msg.nested_type)) + nested_enums = '\n'.join( + FormatEnum( + type_context.ExtendNestedEnum(index, nested_enum.name), nested_enum) + for index, nested_enum in enumerate(msg.enum_type)) + anchor = FormatAnchor(MessageCrossRefLabel(type_context.name)) + header = FormatHeader('-', type_context.name) + proto_link = FormatExternalLink('[%s proto]' % type_context.name, + type_context.GithubUrl()) + '\n\n' + leading_comment, annotations = type_context.LeadingCommentPathLookup() + if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations: + return '' + return anchor + header + proto_link + leading_comment + FormatMessageAsJson( + type_context, msg) + FormatMessageAsDefinitionList( + type_context, msg) + nested_msgs + '\n' + nested_enums + + +def FormatEnumValueAsDefinitionListItem(type_context, enum_value): + """Format a EnumValueDescriptorProto as RST definition list item. + + Args: + type_context: contextual information for message/enum/field. + enum_value: EnumValueDescriptorProto. + Returns: + RST formatted definition list item. + """ + anchor = FormatAnchor(EnumValueCrossRefLabel(type_context.name)) + default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' + leading_comment, annotations = type_context.LeadingCommentPathLookup() + if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations: + return '' + comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + leading_comment + return anchor + enum_value.name + '\n' + MapLines( + functools.partial(Indent, 2), comment) + + +def FormatEnumAsDefinitionList(type_context, enum): + """Format a EnumDescriptorProto as RST definition list. + + Args: + type_context: contextual information for message/enum/field. + enum: DescriptorProto. + Returns: + RST formatted definition list item. + """ + return '\n'.join( + FormatEnumValueAsDefinitionListItem( + type_context.ExtendEnumValue(index, enum_value.name), enum_value) + for index, enum_value in enumerate(enum.value)) + '\n' + + +def FormatEnum(type_context, enum): + """Format an EnumDescriptorProto as RST section. + + Args: + type_context: contextual information for message/enum/field. + enum: EnumDescriptorProto. + Returns: + RST formatted section. + """ + anchor = FormatAnchor(EnumCrossRefLabel(type_context.name)) + header = FormatHeader('-', 'Enum %s' % type_context.name) + proto_link = FormatExternalLink('[%s proto]' % type_context.name, + type_context.GithubUrl()) + '\n\n' + leading_comment, annotations = type_context.LeadingCommentPathLookup() + if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations: + return '' + return anchor + header + proto_link + leading_comment + FormatEnumAsDefinitionList( + type_context, enum) + + +def FormatProtoAsBlockComment(proto): + """Format as RST a proto as a block comment. + + Useful in debugging, not usually referenced. + """ + return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), + str(proto)) + '\n' + + +def GenerateRst(proto_file): + """Generate a RST representation from a FileDescriptor proto.""" + source_code_info = SourceCodeInfo(proto_file.name, + proto_file.source_code_info) + # Find the earliest detached comment, attribute it to file level. + # Also extract file level titles if any. + header, comment = FormatHeaderFromFile( + '=', source_code_info.file_level_comment, proto_file.name) + package_prefix = NormalizeFQN('.' + proto_file.package + '.')[:-1] + package_type_context = TypeContext(source_code_info, package_prefix) + msgs = '\n'.join( + FormatMessage(package_type_context.ExtendMessage(index, msg.name), msg) + for index, msg in enumerate(proto_file.message_type)) + enums = '\n'.join( + FormatEnum(package_type_context.ExtendEnum(index, enum.name), enum) + for index, enum in enumerate(proto_file.enum_type)) + debug_proto = FormatProtoAsBlockComment(proto_file) + return header + comment + msgs + enums # + debug_proto + +def Main(): + # http://www.expobrain.net/2015/09/13/create-a-plugin-for-google-protocol-buffer/ + request = plugin_pb2.CodeGeneratorRequest() + request.ParseFromString(sys.stdin.read()) + response = plugin_pb2.CodeGeneratorResponse() + cprofile_enabled = os.getenv('CPROFILE_ENABLED') + + for proto_file in request.proto_file: + f = response.file.add() + f.name = proto_file.name + '.rst' + if cprofile_enabled: + pr = cProfile.Profile() + pr.enable() + # We don't actually generate any RST right now, we just string dump the + # input proto file descriptor into the output file. + f.content = GenerateRst(proto_file) + if cprofile_enabled: + pr.disable() + stats_stream = StringIO.StringIO() + ps = pstats.Stats(pr, stream=stats_stream).sort_stats(os.getenv('CPROFILE_SORTBY', 'cumulative')) + stats_file = response.file.add() + stats_file.name = proto_file.name + '.rst.profile' + ps.print_stats() + stats_file.content = stats_stream.getvalue() + sys.stdout.write(response.SerializeToString()) + +if __name__ == '__main__': + Main()