diff --git a/.circleci/config.templ.yml b/.circleci/config.templ.yml index ea6d3c9cb39..7beaabf9791 100644 --- a/.circleci/config.templ.yml +++ b/.circleci/config.templ.yml @@ -4,7 +4,7 @@ default_resource_class: &default_resource_class medium ubuntu_base_image: &ubuntu_base_img ubuntu-2004:2023.04.2 cimg_base_image: &cimg_base_image cimg/base:2022.08 python310_image: &python310_image cimg/python:3.10.12 -ddtrace_dev_image: &ddtrace_dev_image ghcr.io/datadog/dd-trace-py/testrunner@sha256:4c8afd048321e702f3605b4ae4d206fcd00e74bac708089cfe7f9c24383dc53b +ddtrace_dev_image: &ddtrace_dev_image ghcr.io/datadog/dd-trace-py/testrunner@sha256:8ca43d46ff34e078bd7bc0662e74e6be38547a98140a5cd4203805f6b214b583 redis_image: &redis_image redis:4.0-alpine@sha256:3e99741f293147ff406657dda7644c2b88564b80a498cd00da8f905743449c9f memcached_image: &memcached_image memcached:1.5-alpine@sha256:48cb7207e3d34871893fa1628f3a4984375153e9942facf82e25935b0a633c8a cassandra_image: &cassandra_image cassandra:3.11.7@sha256:495e5752526f7e75d3ad85b6a6bbf3b79714321b17a44255a216c341e3baae11 diff --git a/.github/workflows/django-overhead-profile.yml b/.github/workflows/django-overhead-profile.yml index 3d5add5d559..55f023f0c8f 100644 --- a/.github/workflows/django-overhead-profile.yml +++ b/.github/workflows/django-overhead-profile.yml @@ -13,12 +13,19 @@ on: jobs: django-overhead-profile: runs-on: ubuntu-latest + strategy: + matrix: + include: + - suffix: "-v1" + stack_v2: "0" + - suffix: "-v2" + stack_v2: "1" env: PREFIX: ${{ github.workspace }}/prefix DD_CODE_ORIGIN_FOR_SPANS_ENABLED: "1" DD_PROFILING_ENABLED: "1" - DD_PROFILING_STACK_V2_ENABLED: "1" - DD_PROFILING_OUTPUT_PPROF: ${{ github.workspace }}/prefix/artifacts/ddtrace_profile.pprof + DD_PROFILING_STACK_V2_ENABLED: ${{ matrix.stack_v2 }} + DD_PROFILING_OUTPUT_PPROF: ${{ github.workspace }}/prefix/artifacts/ddtrace_profile defaults: run: working-directory: ddtrace @@ -41,6 +48,6 @@ jobs: - uses: actions/upload-artifact@v4 with: - name: django-overhead-profile + name: django-overhead-profile${{ matrix.suffix }} path: ${{ github.workspace }}/prefix/artifacts diff --git a/.github/workflows/requirements-locks.yml b/.github/workflows/requirements-locks.yml index b1703e9c27e..f64c17bc413 100644 --- a/.github/workflows/requirements-locks.yml +++ b/.github/workflows/requirements-locks.yml @@ -11,7 +11,7 @@ jobs: validate: name: Check requirements lockfiles runs-on: ubuntu-latest - container: ghcr.io/datadog/dd-trace-py/testrunner@sha256:4c8afd048321e702f3605b4ae4d206fcd00e74bac708089cfe7f9c24383dc53b + container: ghcr.io/datadog/dd-trace-py/testrunner@sha256:8ca43d46ff34e078bd7bc0662e74e6be38547a98140a5cd4203805f6b214b583 steps: - uses: actions/checkout@v4 with: diff --git a/.gitignore b/.gitignore index 92f055b78ce..72b95218d1c 100644 --- a/.gitignore +++ b/.gitignore @@ -162,6 +162,8 @@ template_venv/ cloned_venvs/ # CircleCI generated config .circleci/config.gen.yml +# GitLab CI generated config +.gitlab/*-gen.yml # Automatically generated fixtures tests/appsec/iast/fixtures/aspects/callers.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fa04948a8e3..c3e44cc8e75 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ stages: - package - - tests + - tests-gen + - tests-trigger - shared-pipeline - dogfood - benchmarks @@ -10,27 +11,37 @@ stages: variables: REPO_LANG: python # "python" is used everywhere rather than "py" - TESTRUNNER_IMAGE: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:ecc5741ff3e7c8a30363fcd9cca79a371dcea5b4 # CI_DEBUG_SERVICES: "true" -.testrunner: - image: $TESTRUNNER_IMAGE - # DEV: we have a larger pool of amd64 runners, prefer that over arm64 - tags: [ "arch:amd64" ] - timeout: 20m - before_script: - - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13-dev - - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ - include: - remote: https://gitlab-templates.ddbuild.io/libdatadog/include/one-pipeline.yml - local: ".gitlab/services.yml" # Include early so others can use the definitions - local: ".gitlab/benchmarks.yml" - local: ".gitlab/package.yml" - - local: ".gitlab/tests.yml" - local: ".gitlab/macrobenchmarks.yml" - local: ".gitlab/dogfood.yml" - local: ".gitlab/release.yml" + - local: ".gitlab/testrunner.yml" + +tests-gen: + stage: tests-gen + extends: .testrunner + script: + - pip install riot==0.20.0 + - riot -v run --pass-env -s gitlab-gen-config -v + needs: [] + artifacts: + paths: + - .gitlab/tests-gen.yml + +run-tests-trigger: + stage: tests-trigger + needs: [ tests-gen ] + trigger: + include: + - artifact: .gitlab/tests-gen.yml + job: tests-gen + strategy: depend requirements_json_test: rules: diff --git a/.gitlab/download-wheels-from-gh-actions.sh b/.gitlab/download-wheels-from-gh-actions.sh index 2803558d784..2ffe403cd14 100755 --- a/.gitlab/download-wheels-from-gh-actions.sh +++ b/.gitlab/download-wheels-from-gh-actions.sh @@ -6,20 +6,26 @@ if [ -z "$CI_COMMIT_SHA" ]; then exit 1 fi -echo "Querying for RUN_ID" - -timeout=600 # 10 minutes -start_time=$(date +%s) -end_time=$((start_time + timeout)) -# Loop for 10 minutes waiting for run to appear in github -while [ $(date +%s) -lt $end_time ]; do - RUN_ID=$(gh run ls --repo DataDog/dd-trace-py --commit=$CI_COMMIT_SHA --workflow=build_deploy.yml --json databaseId --jq "first (.[]) | .databaseId") - if [ -n "$RUN_ID" ]; then - break; - fi - echo "Waiting for RUN_ID" - sleep 20 -done +RUN_ID=$(gh run ls --repo DataDog/dd-trace-py --commit=$CI_COMMIT_SHA --workflow=build_deploy.yml --json databaseId --jq "first (.[]) | .databaseId") +if [ -n "$RUN_ID" ]; then + # The job has not started yet. Give it time to start + sleep 180 # 3 minutes + + echo "Querying for RUN_ID" + + timeout=600 # 10 minutes + start_time=$(date +%s) + end_time=$((start_time + timeout)) + # Loop for 10 minutes waiting for run to appear in github + while [ $(date +%s) -lt $end_time ]; do + RUN_ID=$(gh run ls --repo DataDog/dd-trace-py --commit=$CI_COMMIT_SHA --workflow=build_deploy.yml --json databaseId --jq "first (.[]) | .databaseId") + if [ -n "$RUN_ID" ]; then + break; + fi + echo "Waiting for RUN_ID" + sleep 60 + done +fi if [ -z "$RUN_ID" ]; then echo "RUN_ID not found" @@ -27,16 +33,21 @@ if [ -z "$RUN_ID" ]; then fi echo "Found RUN_ID: $RUN_ID" -echo "Waiting for workflow to finish" - -# wait for run to finish -gh run watch $RUN_ID --interval 45 --exit-status 1 --repo DataDog/dd-trace-py mkdir pywheels cd pywheels -echo "Github workflow finished. Downloading wheels" +if [[ $(gh run view $RUN_ID --exit-status --json status --jq .status) != "completed" ]]; then + echo "Waiting for workflow to finish" + + # Give time to the job to finish + sleep 300 # 5 minutes + # wait for run to finish + gh run watch $RUN_ID --interval 60 --exit-status 1 --repo DataDog/dd-trace-py +fi + +echo "Github workflow finished. Downloading wheels" # download all wheels gh run download $RUN_ID --repo DataDog/dd-trace-py diff --git a/.gitlab/package.yml b/.gitlab/package.yml index 2f896637599..74d76bc0ae4 100644 --- a/.gitlab/package.yml +++ b/.gitlab/package.yml @@ -1,24 +1,3 @@ -build_base_venvs: - extends: .testrunner - stage: package - parallel: - matrix: - - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] - variables: - CMAKE_BUILD_PARALLEL_LEVEL: 12 - PIP_VERBOSE: 1 - DD_PROFILING_NATIVE_TESTS: 1 - script: - - pip install riot==0.20.0 - - riot -P -v generate --python=$PYTHON_VERSION - artifacts: - name: venv_$PYTHON_VERSION - paths: - - .riot/venv_* - - ddtrace/**/*.so* - - ddtrace/internal/datadog/profiling/crashtracker/crashtracker_exe* - - ddtrace/internal/datadog/profiling/test/test_* - download_ddtrace_artifacts: image: registry.ddbuild.io/github-cli:v27480869-eafb11d-2.43.0 tags: [ "arch:amd64" ] diff --git a/.gitlab/testrunner.yml b/.gitlab/testrunner.yml new file mode 100644 index 00000000000..38e2111ec78 --- /dev/null +++ b/.gitlab/testrunner.yml @@ -0,0 +1,8 @@ +.testrunner: + image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:7a2e802af76051f82d698919d2837eff18dbb48e + # DEV: we have a larger pool of amd64 runners, prefer that over arm64 + tags: [ "arch:amd64" ] + timeout: 20m + before_script: + - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13-dev + - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ diff --git a/.gitlab/tests.yml b/.gitlab/tests.yml index 8d1b3d16d54..45b01d02f47 100644 --- a/.gitlab/tests.yml +++ b/.gitlab/tests.yml @@ -1,5 +1,23 @@ +stages: + - tests + variables: RIOT_RUN_CMD: riot -P -v run --exitfirst --pass-env -s + REPO_LANG: python # "python" is used everywhere rather than "py" + # CI_DEBUG_SERVICES: "true" + + +.testrunner: + image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:7a2e802af76051f82d698919d2837eff18dbb48e + # DEV: we have a larger pool of amd64 runners, prefer that over arm64 + tags: [ "arch:amd64" ] + timeout: 20m + before_script: + - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13-dev + - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ + + +{{services.yml}} .test_base_hatch: extends: .testrunner @@ -7,6 +25,8 @@ variables: # Hatch doesn't use pre-built wheels or venvs so we can start them right away needs: [] parallel: 4 + # DEV: This is the max retries that GitLab currently allows for + retry: 2 script: - export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --ddtrace" - export _DD_CIVISIBILITY_USE_CI_CONTEXT_PROVIDER=true @@ -22,6 +42,7 @@ variables: hatch run ${env}:test done + .test_base_hatch_snapshot: extends: .test_base_hatch services: @@ -32,6 +53,28 @@ variables: # agent at that host. Therefore setting this as a variable will cause recursive requests to the testagent - export DD_TRACE_AGENT_URL="http://testagent:9126" + +build_base_venvs: + extends: .testrunner + stage: tests + parallel: + matrix: + - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + variables: + CMAKE_BUILD_PARALLEL_LEVEL: 12 + PIP_VERBOSE: 1 + DD_PROFILING_NATIVE_TESTS: 1 + script: + - pip install riot==0.20.0 + - riot -P -v generate --python=$PYTHON_VERSION + artifacts: + name: venv_$PYTHON_VERSION + paths: + - .riot/venv_* + - ddtrace/**/*.so* + - ddtrace/internal/datadog/profiling/crashtracker/crashtracker_exe* + - ddtrace/internal/datadog/profiling/test/test_* + .test_base_riot: extends: .testrunner stage: tests @@ -39,6 +82,8 @@ variables: parallel: 4 services: - !reference [.services, ddagent] + # DEV: This is the max retries that GitLab currently allows for + retry: 2 script: - pip install riot==0.20.0 - unset DD_SERVICE @@ -71,13 +116,20 @@ variables: # DEV: All job variables get shared with services, setting `DD_TRACE_AGENT_URL` on the testagent will tell it to forward all requests to the # agent at that host. Therefore setting this as a variable will cause recursive requests to the testagent - export DD_TRACE_AGENT_URL="http://testagent:9126" + - ln -s "${CI_PROJECT_DIR}" "/root/project" + +slotscheck: + extends: .testrunner + stage: tests + needs: [] + script: + - hatch run slotscheck:_ + +conftests: + extends: .testrunner + stage: tests + needs: [] + script: + - hatch run meta-testing:meta-testing -include: - - local: ".gitlab/tests/appsec.yml" - - local: ".gitlab/tests/ci_visibility.yml" - - local: ".gitlab/tests/contrib.yml" - - local: ".gitlab/tests/core.yml" - - local: ".gitlab/tests/debugging.yml" - - local: ".gitlab/tests/llmobs.yml" - - local: ".gitlab/tests/tracer.yml" - - local: ".gitlab/tests/profiling.yml" +# Required jobs will appear here diff --git a/.gitlab/tests/appsec.yml b/.gitlab/tests/appsec.yml deleted file mode 100644 index 3cb1f360192..00000000000 --- a/.gitlab/tests/appsec.yml +++ /dev/null @@ -1,70 +0,0 @@ -appsec: - extends: .test_base_riot_snapshot - parallel: 6 - variables: - SUITE_NAME: "appsec$" - retry: 2 - -appsec iast: - extends: .test_base_riot_snapshot - parallel: 6 - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, postgres] - variables: - SUITE_NAME: "appsec_iast$" - TEST_POSTGRES_HOST: "postgres" - retry: 2 - timeout: 25m - -appsec iast tdd_propagation: - extends: .test_base_riot_snapshot - parallel: 2 - variables: - SUITE_NAME: "appsec_iast_tdd_propagation" - retry: 2 - -appsec iast memcheck: - extends: .test_base_riot_snapshot - parallel: 4 - variables: - SUITE_NAME: "appsec_iast_memcheck" - CI_DEBUG_TRACE: "true" - PYTEST_ADDOPTS: "-v -s" - retry: 2 - -appsec threats django: - extends: .test_base_hatch - parallel: 12 - variables: - SUITE_NAME: "appsec_threats_django" - retry: 2 - -appsec threats flask: - extends: .test_base_hatch - parallel: 10 - variables: - SUITE_NAME: "appsec_threats_flask" - retry: 2 - -appsec threats fastapi: - extends: .test_base_hatch - parallel: 9 - variables: - SUITE_NAME: "appsec_threats_fastapi" - retry: 2 - -appsec aggregated leak testing: - extends: .test_base_hatch - parallel: 3 - variables: - SUITE_NAME: "appsec_aggregated_leak_testing" - retry: 2 - timeout: 35m - -appsec iast native: - extends: .test_base_hatch - parallel: 6 - variables: - SUITE_NAME: "appsec_iast_native" - retry: 2 diff --git a/.gitlab/tests/ci_visibility.yml b/.gitlab/tests/ci_visibility.yml deleted file mode 100644 index ef3791bad93..00000000000 --- a/.gitlab/tests/ci_visibility.yml +++ /dev/null @@ -1,28 +0,0 @@ -ci_visibility: - extends: .test_base_riot_snapshot - parallel: 6 - variables: - SUITE_NAME: "ci_visibility" - -dd_coverage: - extends: .test_base_hatch_snapshot - parallel: 6 - variables: - SUITE_NAME: "dd_coverage" - -pytest: - extends: .test_base_riot_snapshot - parallel: 12 - variables: - SUITE_NAME: "pytest" - -pytest_v2: - extends: .test_base_hatch_snapshot - parallel: 10 - variables: - SUITE_NAME: "pytest" - -unittest: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "unittest" diff --git a/.gitlab/tests/contrib.yml b/.gitlab/tests/contrib.yml deleted file mode 100644 index b81feaa41d9..00000000000 --- a/.gitlab/tests/contrib.yml +++ /dev/null @@ -1,394 +0,0 @@ -algoliasearch: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "algoliasearch" - -aredis: - extends: .test_base_riot_snapshot - parallel: 3 - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, redis] - variables: - SUITE_NAME: "aredis$" - -asgi: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "asgi$" - -asynctest: - extends: .test_base_riot - parallel: 3 - variables: - SUITE_NAME: 'asynctest$' - -avro: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "avro" - -bottle: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "bottle" - -celery: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, rabbitmq] - - !reference [.services, redis] - variables: - SUITE_NAME: "celery" - LOG_LEVEL: DEBUG - SNAPSHOT_DIR: /snapshots - PORT: 9126 - SNAPSHOT_CI: 1 - DD_POOL_TRACE_CHECK_FAILURES: true - DD_DISABLE_ERROR_RESPONSES: true - ENABLED_CHECKS: trace_stall,meta_tracer_version_header,trace_content_length,trace_peer_service,trace_dd_service # disable flaky content length check - - -cherrypy: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "cherrypy" - -django: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, memcached] - - !reference [.services, redis] - - !reference [.services, postgres] - variables: - SUITE_NAME: "django($|_celery)" - TEST_POSTGRES_HOST: "postgres" - TEST_MEMCACHED_HOST: "memcached" - TEST_REDIS_HOST: "redis" - -django_hosts: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: 'django_hosts$' - -djangorestframework: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, memcached] - - !reference [.services, redis] - variables: - SUITE_NAME: "djangorestframework" - TEST_MEMCACHED_HOST: "memcached" - TEST_REDIS_HOST: "redis" - -dogpile_cache: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "dogpile_cache" - -dramatiq: - parallel: 2 - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, redis] - variables: - SUITE_NAME: "dramatiq" - TEST_REDIS_HOST: "redis" - -elasticsearch: - extends: .test_base_riot_snapshot - parallel: 17 - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, elasticsearch] - - !reference [.services, opensearch] - variables: - SUITE_NAME: "elasticsearch" - TEST_ELASTICSEARCH_HOST: "elasticsearch" - TEST_OPENSEARCH_HOST: "opensearch" - -falcon: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "falcon" - -fastapi: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "fastapi" - -flask: - extends: .test_base_riot_snapshot - parallel: 14 - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, memcached] - - !reference [.services, redis] - variables: - SUITE_NAME: "flask" - TEST_MEMCACHED_HOST: "memcached" - TEST_REDIS_HOST: "redis" - -gevent: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "gevent" - -graphene: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "graphene" - -graphql: - extends: .test_base_riot_snapshot - parallel: 6 - variables: - SUITE_NAME: "graphql" - -grpc: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "grpc" - -gunicorn: - extends: .test_base_riot_snapshot - parallel: 12 - variables: - SUITE_NAME: "gunicorn" - -jinja2: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "jinja2" - -kafka: - extends: .test_base_riot_snapshot - parallel: 4 - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, kafka] - variables: - SUITE_NAME: "kafka" - TEST_KAFKA_HOST: "kafka" - TEST_KAFKA_PORT: "29092" - -kombu: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, rabbitmq] - variables: - SUITE_NAME: "kombu" - -logbook: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "logbook" - -loguru: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "loguru" - -mako: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "mako" - -molten: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "molten" - -protobuf: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "protobuf" - -opentracer: - extends: .test_base_riot - variables: - SUITE_NAME: "opentracer" - -pylibmc: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, memcached] - variables: - SUITE_NAME: "pylibmc" - -pymemcache: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, memcached] - variables: - SUITE_NAME: "pymemcache" - -pymongo: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, mongo] - variables: - SUITE_NAME: "pymongo" - -pynamodb: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "pynamodb" - -pyodbc: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "pyodbc" - -pyramid: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "pyramid" - before_script: - - !reference [.test_base_riot_snapshot, before_script] - # This is because in the riotfile we specify a path to tests/contrib/pyramid/pserve_app - # in the lockfile it gets replaced with the absolute path of /root/project/tests/.../pserve_app - - ln -s "${CI_PROJECT_DIR}" "/root/project" - -redis: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, redis] - - !reference [.services, rediscluster] - parallel: 5 - variables: - SUITE_NAME: "^redis$" - -rediscluster: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, redis] - - !reference [.services, rediscluster] - variables: - SUITE_NAME: "rediscluster" - -rq: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, redis] - variables: - SUITE_NAME: "rq" - parallel: 2 - -sanic: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "sanic" - -snowflake: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "snowflake" - -sourcecode: - extends: .test_base_riot - variables: - SUITE_NAME: "sourcecode" - -starlette: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "starlette" - -stdlib: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: 'asyncio$|sqlite3$|futures$|dbapi$|dbapi_async$' - -structlog: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "structlog" - -sourcecode: - extends: .test_base_riot - variables: - SUITE_NAME: "sourcecode" - -subprocess: - extends: .test_base_riot - variables: - SUITE_NAME: "subprocess" - -test_logging: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: 'test_logging' - -tornado: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "tornado" - -aiohttp: - extends: .test_base_riot_snapshot - parallel: 3 - variables: - SUITE_NAME: "aiohttp" - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, httpbin_local] - -requests: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "requests" - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, httpbin_local] - -httplib: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "httplib" - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, httpbin_local] - -httpx: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "httpx" - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, httpbin_local] - parallel: 2 - -urllib3: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, httpbin_local] - variables: - SUITE_NAME: "urllib3" - TEST_HTTPBIN_HOST: "httpbin-local" - TEST_HTTPBIN_PORT: "8001" - -wsgi: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "wsgi" - -yaaredis: - extends: .test_base_riot_snapshot - services: - - !reference [.test_base_riot_snapshot, services] - - !reference [.services, redis] - variables: - SUITE_NAME: "yaaredis$" diff --git a/.gitlab/tests/core.yml b/.gitlab/tests/core.yml deleted file mode 100644 index d283aa25981..00000000000 --- a/.gitlab/tests/core.yml +++ /dev/null @@ -1,49 +0,0 @@ -internal: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "internal" - -telemetry: - extends: .test_base_riot_snapshot - parallel: 4 - variables: - SUITE_NAME: "telemetry" - -integration-testagent: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "integration-snapshot*" - -integration-agent: - extends: .test_base_riot - variables: - SUITE_NAME: "integration-latest*" - -vendor: - extends: .test_base_riot - variables: - SUITE_NAME: "vendor" - -ddtracerun: - extends: .test_base_riot - parallel: 6 - services: - - !reference [.test_base_riot, services] - - !reference [.services, redis] - variables: - SUITE_NAME: "ddtracerun" - TEST_REDIS_HOST: "redis" - -slotscheck: - extends: .testrunner - stage: tests - needs: [] - script: - - hatch run slotscheck:_ - -conftests: - extends: .testrunner - stage: tests - needs: [] - script: - - hatch run meta-testing:meta-testing diff --git a/.gitlab/tests/debugging.yml b/.gitlab/tests/debugging.yml deleted file mode 100644 index 644073eb50e..00000000000 --- a/.gitlab/tests/debugging.yml +++ /dev/null @@ -1,5 +0,0 @@ -debugger: - extends: .test_base_riot - variables: - SUITE_NAME: "debugger" - diff --git a/.gitlab/tests/llmobs.yml b/.gitlab/tests/llmobs.yml deleted file mode 100644 index 7abae87537c..00000000000 --- a/.gitlab/tests/llmobs.yml +++ /dev/null @@ -1,28 +0,0 @@ - -llmobs: - extends: .test_base_riot - variables: - SUITE_NAME: "llmobs" - -openai: - extends: .test_base_riot_snapshot - parallel: 10 - variables: - SUITE_NAME: "openai" - -langchain: - extends: .test_base_riot_snapshot - parallel: 6 - variables: - SUITE_NAME: "langchain" - -anthropic: - extends: .test_base_riot_snapshot - parallel: 3 - variables: - SUITE_NAME: "anthropic" - -google_generativeai: - extends: .test_base_riot_snapshot - variables: - SUITE_NAME: "google_generativeai" diff --git a/.gitlab/tests/profiling.yml b/.gitlab/tests/profiling.yml deleted file mode 100644 index 9e7a6c8e2a1..00000000000 --- a/.gitlab/tests/profiling.yml +++ /dev/null @@ -1,7 +0,0 @@ -profiling: - extends: .test_base_riot - parallel: 20 - variables: - DD_TRACE_AGENT_URL: "" - SUITE_NAME: "profile$|profile-v2" - retry: 2 diff --git a/.gitlab/tests/tracer.yml b/.gitlab/tests/tracer.yml deleted file mode 100644 index 85898157c04..00000000000 --- a/.gitlab/tests/tracer.yml +++ /dev/null @@ -1,7 +0,0 @@ -tracer: - extends: .test_base_riot - parallel: 9 - variables: - DD_TRACE_AGENT_URL: "http://localhost:8126" - SUITE_NAME: "tracer" - retry: 2 \ No newline at end of file diff --git a/.riot/requirements/12616cb.txt b/.riot/requirements/12616cb.txt index 31f15cfe47c..3c82298cb9b 100644 --- a/.riot/requirements/12616cb.txt +++ b/.riot/requirements/12616cb.txt @@ -5,7 +5,7 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/12616cb.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.4 exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==8.5.0 @@ -13,13 +13,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.9.0 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.2 +tomli==2.1.0 +zipp==3.21.0 diff --git a/.riot/requirements/13015fd.txt b/.riot/requirements/13015fd.txt index 91e0fd2f268..8c2770377f7 100644 --- a/.riot/requirements/13015fd.txt +++ b/.riot/requirements/13015fd.txt @@ -14,13 +14,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 zipp==3.20.2 diff --git a/.riot/requirements/13fe884.txt b/.riot/requirements/13fe884.txt index 01112f7f592..30a7765ba28 100644 --- a/.riot/requirements/13fe884.txt +++ b/.riot/requirements/13fe884.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/13fe884.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.4 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.12.3 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/167d6de.txt b/.riot/requirements/167d6de.txt index db7cabb99b9..1ee4a67ee64 100644 --- a/.riot/requirements/167d6de.txt +++ b/.riot/requirements/167d6de.txt @@ -13,7 +13,7 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.8.0 pytest==8.3.3 @@ -21,5 +21,5 @@ pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 zipp==3.20.2 diff --git a/.riot/requirements/185fc1c.txt b/.riot/requirements/185fc1c.txt index 7878cf85378..8f051b3b5aa 100644 --- a/.riot/requirements/185fc1c.txt +++ b/.riot/requirements/185fc1c.txt @@ -13,7 +13,7 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.13.0 pytest==8.3.3 @@ -21,5 +21,5 @@ pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 zipp==3.20.2 diff --git a/.riot/requirements/196755b.txt b/.riot/requirements/196755b.txt new file mode 100644 index 00000000000..250298e3848 --- /dev/null +++ b/.riot/requirements/196755b.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --no-annotate .riot/requirements/196755b.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +ruamel-yaml==0.18.6 +ruamel-yaml-clib==0.2.8 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/19aba18.txt b/.riot/requirements/19aba18.txt index 36dde507408..33c5569110a 100644 --- a/.riot/requirements/19aba18.txt +++ b/.riot/requirements/19aba18.txt @@ -14,13 +14,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 zipp==3.20.2 diff --git a/.riot/requirements/1dbb110.txt b/.riot/requirements/1dbb110.txt index 0c1bd066ca7..1e6155c399f 100644 --- a/.riot/requirements/1dbb110.txt +++ b/.riot/requirements/1dbb110.txt @@ -5,8 +5,8 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1dbb110.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==8.5.0 @@ -14,13 +14,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.2 +tomli==2.1.0 +zipp==3.21.0 diff --git a/.riot/requirements/1e60db0.txt b/.riot/requirements/1e60db0.txt index d6296d91205..87566f6065c 100644 --- a/.riot/requirements/1e60db0.txt +++ b/.riot/requirements/1e60db0.txt @@ -5,17 +5,17 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1e60db0.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.4 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.12.3 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/1eb9abd.txt b/.riot/requirements/1eb9abd.txt index 68c39ed9da2..f7ad0322c2f 100644 --- a/.riot/requirements/1eb9abd.txt +++ b/.riot/requirements/1eb9abd.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1eb9abd.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 diff --git a/.riot/requirements/1fd0884.txt b/.riot/requirements/1fd0884.txt index e938b48c6a7..d529a0f1ec7 100644 --- a/.riot/requirements/1fd0884.txt +++ b/.riot/requirements/1fd0884.txt @@ -5,7 +5,7 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1fd0884.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.4 exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==8.5.0 @@ -13,13 +13,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.8.0 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.2 +tomli==2.1.0 +zipp==3.21.0 diff --git a/.riot/requirements/5a48bdf.txt b/.riot/requirements/5a48bdf.txt index 766ff8d17fd..0bcb9027eee 100644 --- a/.riot/requirements/5a48bdf.txt +++ b/.riot/requirements/5a48bdf.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/5a48bdf.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 diff --git a/.riot/requirements/6cb445e.txt b/.riot/requirements/6cb445e.txt index a5d1b34beda..743b7ed0241 100644 --- a/.riot/requirements/6cb445e.txt +++ b/.riot/requirements/6cb445e.txt @@ -5,8 +5,8 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/6cb445e.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==8.5.0 @@ -14,13 +14,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.2 +tomli==2.1.0 +zipp==3.21.0 diff --git a/.riot/requirements/8f46789.txt b/.riot/requirements/8f46789.txt index f2fb80a6bee..6284e327733 100644 --- a/.riot/requirements/8f46789.txt +++ b/.riot/requirements/8f46789.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/8f46789.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/a0454b7.txt b/.riot/requirements/a0454b7.txt index 16e7d2d1138..9713d010afb 100644 --- a/.riot/requirements/a0454b7.txt +++ b/.riot/requirements/a0454b7.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/a0454b7.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/a98b986.txt b/.riot/requirements/a98b986.txt index e82b9df5f0a..f3aa18dbe71 100644 --- a/.riot/requirements/a98b986.txt +++ b/.riot/requirements/a98b986.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/a98b986.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/b1f6b59.txt b/.riot/requirements/b1f6b59.txt index c5ef539715f..bd4ef3538c7 100644 --- a/.riot/requirements/b1f6b59.txt +++ b/.riot/requirements/b1f6b59.txt @@ -5,19 +5,19 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/b1f6b59.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.4 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.12.3 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 diff --git a/.riot/requirements/d0fc014.txt b/.riot/requirements/d0fc014.txt index 90ab9109e43..9c4444b6b15 100644 --- a/.riot/requirements/d0fc014.txt +++ b/.riot/requirements/d0fc014.txt @@ -5,7 +5,7 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/d0fc014.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.4 exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==8.5.0 @@ -13,13 +13,13 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.13.0 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.2 +tomli==2.1.0 +zipp==3.21.0 diff --git a/.riot/requirements/d66afaf.txt b/.riot/requirements/d66afaf.txt index 2646a4200b9..8468d6fd083 100644 --- a/.riot/requirements/d66afaf.txt +++ b/.riot/requirements/d66afaf.txt @@ -13,7 +13,7 @@ iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pymongo==3.9.0 pytest==8.3.3 @@ -21,5 +21,5 @@ pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.1.0 zipp==3.20.2 diff --git a/.riot/requirements/de7d3ce.txt b/.riot/requirements/de7d3ce.txt index c4a7640ed31..d74fc75f0c9 100644 --- a/.riot/requirements/de7d3ce.txt +++ b/.riot/requirements/de7d3ce.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/de7d3ce.in # attrs==24.2.0 -coverage[toml]==7.6.1 -dnspython==2.6.1 +coverage[toml]==7.6.4 +dnspython==2.7.0 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 mongoengine==0.29.1 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pymongo==4.10.0 +pymongo==4.10.1 pytest==8.3.3 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 17541db9926..97b9e1a0bc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1493,7 +1493,7 @@ tracing: This release adds support for lazy sampling, essentially moving when we - profiler: Fixes a sigabrt when shutdown occurs during an upload -- otel: Ensures all otel sampling decisions are consistent with Datadog Spans. This prevents otel spans in a distrbuted trace from being sampled differently than Datadog spans in the same trace. +- otel: Ensures all otel sampling decisions are consistent with Datadog Spans. This prevents otel spans in a distributed trace from being sampled differently than Datadog spans in the same trace. - tracing: Fix an issue where remote configuration values would not be reverted when unset in the UI. @@ -5589,7 +5589,7 @@ If you are using the profiler, please note that `ddtrace.profile` has been renam - Check profiler accuracy (#1260) - chore(ci): fix flake8 and pin pytest and readme_renderer (#1278) - fix(tests,profile): do not test the number of locking events (#1282) -- fix: do not build wheels on Python 3.4 + run test buildin wheels in the CI (#1287) +- fix: do not build wheels on Python 3.4 + run test building wheels in the CI (#1287) - fix(tests,profiling): correct number of frames handling (#1290) - fix(tests, opentracer): flaky threading test (#1293) - build: use latest manylinux images (#1305) diff --git a/ddtrace/_trace/utils_botocore/aws_payload_tagging.py b/ddtrace/_trace/utils_botocore/aws_payload_tagging.py new file mode 100644 index 00000000000..dadb6749a12 --- /dev/null +++ b/ddtrace/_trace/utils_botocore/aws_payload_tagging.py @@ -0,0 +1,242 @@ +import copy +from decimal import Decimal +import json +from typing import Any +from typing import Dict +from typing import Optional + +from ddtrace import Span +from ddtrace import config +from ddtrace.vendor.jsonpath_ng import parse + + +_MAX_TAG_VALUE_LENGTH = 5000 + + +class AWSPayloadTagging: + _INCOMPLETE_TAG = "_dd.payload_tags_incomplete" # Set to True if MAX_TAGS is reached + + _REDACTION_PATHS_DEFAULTS = [ + # SNS + "$..Attributes.KmsMasterKeyId", + "$..Attributes.Token", + # EventBridge + "$..AuthParameters.OAuthParameters.OAuthHttpParameters.HeaderParameters[*].Value", + "$..AuthParameters.OAuthParameters.OAuthHttpParameters.QueryStringParameters[*].Value", + "$..AuthParameters.OAuthParameters.OAuthHttpParameters.BodyParameters[*].Value", + "$..AuthParameters.InvocationHttpParameters.HeaderParameters[*].Value", + "$..AuthParameters.InvocationHttpParameters.QueryStringParameters[*].Value", + "$..AuthParameters.InvocationHttpParameters.BodyParameters[*].Value", + "$..Targets[*].RedshiftDataParameters.Sql", + "$..Targets[*].RedshiftDataParameters.Sqls", + "$..Targets[*].AppSyncParameters.GraphQLOperation", + # // S3 + "$..SSEKMSKeyId", + "$..SSEKMSEncryptionContext", + ] + _REQUEST_REDACTION_PATHS_DEFAULTS = [ + # Sns + "$..Attributes.PlatformCredential", + "$..Attributes.PlatformPrincipal", + "$..AWSAccountId", + "$..Endpoint", + "$..Token", + "$..OneTimePassword", + "$..phoneNumber", + "$..PhoneNumber", + # EventBridge + "$..AuthParameters.BasicAuthParameters.Password", + "$..AuthParameters.OAuthParameters.ClientParameters.ClientSecret", + "$..AuthParameters.ApiKeyAuthParameters.ApiKeyValue", + # S3 + "$..SSECustomerKey", + "$..CopySourceSSECustomerKey", + "$..RestoreRequest.OutputLocation.S3.Encryption.KMSKeyId", + ] + + _RESPONSE_REDACTION_PATHS_DEFAULTS = [ + # // Sns + "$..Endpoints.*.Token", + "$..PlatformApplication.*.PlatformCredential", + "$..PlatformApplication.*.PlatformPrincipal", + "$..Subscriptions.*.Endpoint", + "$..PhoneNumbers[*].PhoneNumber", + "$..phoneNumbers[*]", + # // S3 + "$..Credentials.SecretAccessKey", + "$..Credentials.SessionToken", + ] + + def __init__(self): + self.current_tag_count = 0 + self.validated = False + self.request_redaction_paths = None + self.response_redaction_paths = None + + def expand_payload_as_tags(self, span: Span, result: Dict[str, Any], key: str) -> None: + """ + Expands the JSON payload from various AWS services into tags and sets them on the Span. + """ + if not self.validated: + self.request_redaction_paths = self._get_redaction_paths_request() + self.response_redaction_paths = self._get_redaction_paths_response() + self.validated = True + + if not self.request_redaction_paths and not self.response_redaction_paths: + return + + if not result: + return + + # we will be redacting at least one of request/response + redacted_dict = copy.deepcopy(result) + self.current_tag_count = 0 + if self.request_redaction_paths: + self._redact_json(redacted_dict, span, self.request_redaction_paths) + if self.response_redaction_paths: + self._redact_json(redacted_dict, span, self.response_redaction_paths) + + # flatten the payload into span tags + for key2, value in redacted_dict.items(): + escaped_sub_key = key2.replace(".", "\\.") + self._tag_object(span, f"{key}.{escaped_sub_key}", value) + if self.current_tag_count >= config.botocore.get("payload_tagging_max_tags"): + return + + def _should_json_parse(self, obj: Any) -> bool: + if isinstance(obj, (str, bytes)): + return True + return False + + def _validate_json_paths(self, paths: Optional[str]) -> bool: + """ + Checks whether paths is "all" or all valid JSONPaths + """ + if not paths: + return False # not enabled + + if paths == "all": + return True # enabled, use the defaults + + # otherwise validate that we have valid JSONPaths + for path in paths.split(","): + if path: + try: + parse(path) + except Exception: + return False + else: + return False + + return True + + def _redact_json(self, data: Dict[str, Any], span: Span, paths: list) -> None: + """ + Redact sensitive data in the JSON payload based on default and user-provided JSONPath expressions + """ + for path in paths: + expression = parse(path) + for match in expression.find(data): + match.context.value[match.path.fields[0]] = "redacted" + + def _get_redaction_paths_response(self) -> list: + """ + Get the list of redaction paths, combining defaults with any user-provided JSONPaths. + """ + if not config.botocore.get("payload_tagging_response"): + return [] + + response_redaction = config.botocore.get("payload_tagging_response") + if self._validate_json_paths(response_redaction): + if response_redaction == "all": + return self._RESPONSE_REDACTION_PATHS_DEFAULTS + self._REDACTION_PATHS_DEFAULTS + return ( + self._RESPONSE_REDACTION_PATHS_DEFAULTS + self._REDACTION_PATHS_DEFAULTS + response_redaction.split(",") + ) + + return [] + + def _get_redaction_paths_request(self) -> list: + """ + Get the list of redaction paths, combining defaults with any user-provided JSONPaths. + """ + if not config.botocore.get("payload_tagging_request"): + return [] + + request_redaction = config.botocore.get("payload_tagging_request") + if self._validate_json_paths(request_redaction): + if request_redaction == "all": + return self._REQUEST_REDACTION_PATHS_DEFAULTS + self._REDACTION_PATHS_DEFAULTS + return ( + self._REQUEST_REDACTION_PATHS_DEFAULTS + self._REDACTION_PATHS_DEFAULTS + request_redaction.split(",") + ) + + return [] + + def _tag_object(self, span: Span, key: str, obj: Any, depth: int = 0) -> None: + """ + Recursively expands the given AWS payload object and adds the values as flattened Span tags. + It is not expected that AWS Payloads will be deeply nested so the number of recursive calls should be low. + For example, the following (shortened payload object) becomes: + { + "ResponseMetadata": { + "RequestId": "SOMEID", + "HTTPHeaders": { + "x-amz-request-id": "SOMEID", + "content-length": "5", + } + } + + => + + "aws.response.body.RequestId": "SOMEID" + "aws.response.body.HTTPHeaders.x-amz-request-id": "SOMEID" + "aws.response.body.HTTPHeaders.content-length": "5" + """ + # if we've hit the maximum allowed tags, mark the expansion as incomplete + if self.current_tag_count >= config.botocore.get("payload_tagging_max_tags"): + span.set_tag(self._INCOMPLETE_TAG, True) + return + if obj is None: + self.current_tag_count += 1 + span.set_tag(key, obj) + return + if depth >= config.botocore.get("payload_tagging_max_depth"): + self.current_tag_count += 1 + span.set_tag( + key, str(obj)[:_MAX_TAG_VALUE_LENGTH] + ) # at the maximum depth - set the tag without further expansion + return + depth += 1 + if self._should_json_parse(obj): + try: + parsed = json.loads(obj) + self._tag_object(span, key, parsed, depth) + except ValueError: + self.current_tag_count += 1 + span.set_tag(key, str(obj)[:_MAX_TAG_VALUE_LENGTH]) + return + if isinstance(obj, (int, float, Decimal)): + self.current_tag_count += 1 + span.set_tag(key, str(obj)) + return + if isinstance(obj, list): + for k, v in enumerate(obj): + self._tag_object(span, f"{key}.{k}", v, depth) + return + if hasattr(obj, "items"): + for k, v in obj.items(): + escaped_key = str(k).replace(".", "\\.") + self._tag_object(span, f"{key}.{escaped_key}", v, depth) + return + if hasattr(obj, "to_dict"): + for k, v in obj.to_dict().items(): + escaped_key = str(k).replace(".", "\\.") + self._tag_object(span, f"{key}.{escaped_key}", v, depth) + return + try: + value_as_str = str(obj) + except Exception: + value_as_str = "UNKNOWN" + self.current_tag_count += 1 + span.set_tag(key, value_as_str) diff --git a/ddtrace/_trace/utils_botocore/span_tags.py b/ddtrace/_trace/utils_botocore/span_tags.py index 1b40b5abcb9..5394c2b397a 100644 --- a/ddtrace/_trace/utils_botocore/span_tags.py +++ b/ddtrace/_trace/utils_botocore/span_tags.py @@ -5,6 +5,7 @@ from ddtrace import Span from ddtrace import config +from ddtrace._trace.utils_botocore.aws_payload_tagging import AWSPayloadTagging from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND from ddtrace.constants import SPAN_MEASURED_KEY @@ -15,6 +16,9 @@ from ddtrace.internal.utils.formats import deep_getattr +_PAYLOAD_TAGGER = AWSPayloadTagging() + + def set_botocore_patched_api_call_span_tags(span: Span, instance, args, params, endpoint_name, operation): span.set_tag_str(COMPONENT, config.botocore.integration_name) # set span.kind to the type of request being performed @@ -31,6 +35,11 @@ def set_botocore_patched_api_call_span_tags(span: Span, instance, args, params, if params and not config.botocore["tag_no_params"]: aws._add_api_param_span_tags(span, endpoint_name, params) + if config.botocore["payload_tagging_request"] and endpoint_name in config.botocore.get( + "payload_tagging_services" + ): + _PAYLOAD_TAGGER.expand_payload_as_tags(span, params, "aws.request.body") + else: span.resource = endpoint_name @@ -54,6 +63,11 @@ def set_botocore_response_metadata_tags( return response_meta = result["ResponseMetadata"] + if config.botocore["payload_tagging_response"] and span.get_tag("aws_service") in config.botocore.get( + "payload_tagging_services" + ): + _PAYLOAD_TAGGER.expand_payload_as_tags(span, response_meta, "aws.response.body") + if "HTTPStatusCode" in response_meta: status_code = response_meta["HTTPStatusCode"] span.set_tag(http.STATUS_CODE, status_code) diff --git a/ddtrace/contrib/internal/botocore/patch.py b/ddtrace/contrib/internal/botocore/patch.py index 20de2d8cf11..febad29f982 100644 --- a/ddtrace/contrib/internal/botocore/patch.py +++ b/ddtrace/contrib/internal/botocore/patch.py @@ -104,6 +104,17 @@ def _load_dynamodb_primary_key_names_for_tables() -> Dict[str, Set[str]]: "empty_poll_enabled": asbool(os.getenv("DD_BOTOCORE_EMPTY_POLL_ENABLED", default=True)), "dynamodb_primary_key_names_for_tables": _load_dynamodb_primary_key_names_for_tables(), "add_span_pointers": asbool(os.getenv("DD_BOTOCORE_ADD_SPAN_POINTERS", default=True)), + "payload_tagging_request": os.getenv("DD_TRACE_CLOUD_REQUEST_PAYLOAD_TAGGING", default=None), + "payload_tagging_response": os.getenv("DD_TRACE_CLOUD_RESPONSE_PAYLOAD_TAGGING", default=None), + "payload_tagging_max_depth": int( + os.getenv("DD_TRACE_CLOUD_PAYLOAD_TAGGING_MAX_DEPTH", 10) + ), # RFC defined 10 levels (1.2.3.4...10) as max tagging depth + "payload_tagging_max_tags": int( + os.getenv("DD_TRACE_CLOUD_PAYLOAD_TAGGING_MAX_TAGS", 758) + ), # RFC defined default limit - spans are limited past 1000 + "payload_tagging_services": set( + os.getenv("DD_TRACE_CLOUD_PAYLOAD_TAGGING_SERVICES", default={"s3", "sns", "sqs", "kinesis", "eventbridge"}) + ), }, ) diff --git a/ddtrace/contrib/trace_utils.py b/ddtrace/contrib/trace_utils.py index 00ea2a4382d..db8509d8c35 100644 --- a/ddtrace/contrib/trace_utils.py +++ b/ddtrace/contrib/trace_utils.py @@ -1,6 +1,7 @@ """ This module contains utility functions for writing ddtrace integrations. """ + from collections import deque import ipaddress import re @@ -66,7 +67,6 @@ "x-real-ip", "true-client-ip", "x-client-ip", - "x-forwarded", "forwarded-for", "x-cluster-client-ip", "fastly-client-ip", @@ -431,6 +431,7 @@ def set_http_meta( method=None, # type: Optional[str] url=None, # type: Optional[str] target_host=None, # type: Optional[str] + server_address=None, # type: Optional[str] status_code=None, # type: Optional[Union[int, str]] status_msg=None, # type: Optional[str] query=None, # type: Optional[str] @@ -474,6 +475,9 @@ def set_http_meta( if target_host is not None: span.set_tag_str(net.TARGET_HOST, target_host) + if server_address is not None: + span.set_tag_str(net.SERVER_ADDRESS, server_address) + if status_code is not None: try: int_status_code = int(status_code) diff --git a/ddtrace/debugging/_exception/replay.py b/ddtrace/debugging/_exception/replay.py index 15d03c678af..3a54bce6f51 100644 --- a/ddtrace/debugging/_exception/replay.py +++ b/ddtrace/debugging/_exception/replay.py @@ -20,6 +20,7 @@ from ddtrace.internal.packages import is_user_code from ddtrace.internal.rate_limiter import BudgetRateLimiterWithJitter as RateLimiter from ddtrace.internal.rate_limiter import RateLimitExceeded +from ddtrace.internal.utils.time import HourGlass log = get_logger(__name__) @@ -47,13 +48,70 @@ FRAME_FILE_TAG = "_dd.debug.error.%d.file" FRAME_LINE_TAG = "_dd.debug.error.%d.line" +EXCEPTION_IDENT_LIMIT = 3600.0 # 1 hour +EXCEPTION_IDENT_LIMITER: t.Dict[int, HourGlass] = {} + + +ExceptionChain = t.Deque[t.Tuple[BaseException, t.Optional[TracebackType]]] + + +def exception_ident(exc: BaseException, tb: t.Optional[TracebackType]) -> int: + """Compute the identity of an exception. + + We use the exception type and the traceback to generate a unique identifier + that we can use to identify the exception. This can be used to rate limit + the number of times we capture information of the same exception. + """ + h = 0 + _tb = tb + while _tb is not None: + frame = _tb.tb_frame + h = (h << 1) ^ (id(frame.f_code) << 4 | frame.f_lasti) + h &= 0xFFFFFFFFFFFFFFFF + _tb = _tb.tb_next + return (id(type(exc)) << 64) | h + + +def exception_chain_ident(chain: ExceptionChain) -> int: + h = 0 + for exc, tb in chain: + h = (h << 1) ^ exception_ident(exc, tb) + h &= 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + return h + + +def limit_exception(chain: ExceptionChain) -> bool: + try: + exc_ident = exception_chain_ident(chain) + hg = EXCEPTION_IDENT_LIMITER.get(exc_ident) + if hg is None: + # We haven't seen this exception yet, or it's been evicted + hg = EXCEPTION_IDENT_LIMITER[exc_ident] = HourGlass(duration=EXCEPTION_IDENT_LIMIT) + hg.turn() + return False + + if hg.trickling(): + # We have seen this exception recently + return True + + # We haven't seen this exception in a while + hg.turn() + + return False + finally: + if len(EXCEPTION_IDENT_LIMITER) > 1024: + # We limit the number of exception identities we track to avoid + # memory leaks. + sorted_keys = sorted(EXCEPTION_IDENT_LIMITER, key=lambda k: EXCEPTION_IDENT_LIMITER[k]) + for k in sorted_keys[:256]: + del EXCEPTION_IDENT_LIMITER[k] + def unwind_exception_chain( - exc: t.Optional[BaseException], - tb: t.Optional[TracebackType], -) -> t.Tuple[t.Deque[t.Tuple[BaseException, t.Optional[TracebackType]]], t.Optional[uuid.UUID]]: + exc: t.Optional[BaseException], tb: t.Optional[TracebackType] +) -> t.Tuple[ExceptionChain, t.Optional[uuid.UUID]]: """Unwind the exception chain and assign it an ID.""" - chain: t.Deque[t.Tuple[BaseException, t.Optional[TracebackType]]] = deque() + chain: ExceptionChain = deque() while exc is not None: chain.append((exc, tb)) @@ -160,6 +218,10 @@ def on_span_exception( # No exceptions to capture return + if limit_exception(chain): + # We have seen this exception recently + return + seq = count(1) # 1-based sequence number while chain: @@ -170,7 +232,7 @@ def on_span_exception( continue # DEV: We go from the handler up to the root exception - while _tb and _tb.tb_frame: + while _tb: frame = _tb.tb_frame code = frame.f_code seq_nr = next(seq) diff --git a/ddtrace/internal/ci_visibility/telemetry/events.py b/ddtrace/internal/ci_visibility/telemetry/events.py index fb586a4cc96..189c9a838c2 100644 --- a/ddtrace/internal/ci_visibility/telemetry/events.py +++ b/ddtrace/internal/ci_visibility/telemetry/events.py @@ -16,7 +16,7 @@ class EVENTS_TELEMETRY(str, Enum): CREATED = "event_created" FINISHED = "event_finished" - MANUAL_API_EVENT = "manual_api_event" + MANUAL_API_EVENT = "manual_api_events" ENQUEUED_FOR_SERIALIZATION = "events_enqueued_for_serialization" diff --git a/ddtrace/internal/ci_visibility/telemetry/git.py b/ddtrace/internal/ci_visibility/telemetry/git.py index 000ddfb2249..fa4b2e87285 100644 --- a/ddtrace/internal/ci_visibility/telemetry/git.py +++ b/ddtrace/internal/ci_visibility/telemetry/git.py @@ -69,7 +69,7 @@ def record_settings_response( if require_git: response_tags.append(("require_git", "1")) if itr_enabled: - response_tags.append(("itrskip_enabled", "1")) + response_tags.append(("itr_enabled", "1")) if early_flake_detection_enabled: response_tags.append(("early_flake_detection_enabled", "1")) diff --git a/ddtrace/internal/coverage/instrumentation_py3_10.py b/ddtrace/internal/coverage/instrumentation_py3_10.py index 7561ad24a9b..b328e8beb51 100644 --- a/ddtrace/internal/coverage/instrumentation_py3_10.py +++ b/ddtrace/internal/coverage/instrumentation_py3_10.py @@ -122,7 +122,7 @@ def instrument_all_lines_nonrecursive( previous_line = code.co_firstlineno previous_line_new_offset = 0 - previous_previous_line = 0 + previous_previous_line = code.co_firstlineno arg = 0 previous_arg = 0 diff --git a/ddtrace/internal/utils/time.py b/ddtrace/internal/utils/time.py index b31f3c3e4f1..1993b9dd21c 100644 --- a/ddtrace/internal/utils/time.py +++ b/ddtrace/internal/utils/time.py @@ -134,3 +134,9 @@ def __enter__(self): def __exit__(self, tp, value, traceback): pass + + def __lt__(self, other) -> bool: + return self._end_at < other._end_at + + def __eq__(self, other) -> bool: + return self._end_at == other._end_at diff --git a/ddtrace/llmobs/_writer.py b/ddtrace/llmobs/_writer.py index 0b79176b51a..6496de96cfe 100644 --- a/ddtrace/llmobs/_writer.py +++ b/ddtrace/llmobs/_writer.py @@ -11,6 +11,7 @@ from typing import TypedDict except ImportError: from typing_extensions import TypedDict +import ddtrace from ddtrace import config from ddtrace.internal import agent from ddtrace.internal import forksafe @@ -204,7 +205,7 @@ def encode(self): return None, 0 events = self._buffer self._init_buffer() - data = {"_dd.stage": "raw", "event_type": "span", "spans": events} + data = {"_dd.stage": "raw", "_dd.tracer_version": ddtrace.__version__, "event_type": "span", "spans": events} try: enc_llm_events = json.dumps(data) logger.debug("encode %d LLMObs span events to be sent", len(events)) diff --git a/ddtrace/profiling/collector/_lock.py b/ddtrace/profiling/collector/_lock.py index 74a94e380e2..f2d1289d73b 100644 --- a/ddtrace/profiling/collector/_lock.py +++ b/ddtrace/profiling/collector/_lock.py @@ -269,7 +269,7 @@ def _find_self_name(self, var_dict: typing.Dict): # Get lock acquire/release call location and variable name the lock is assigned to def _maybe_update_self_name(self): - if self._self_name: + if self._self_name is not None: return try: # We expect the call stack to be like this: @@ -298,7 +298,7 @@ def _maybe_update_self_name(self): if not self._self_name: self._self_name = "" - LOG.warning( + LOG.debug( "Failed to get lock variable name, we only support local/global variables and their attributes." ) diff --git a/ddtrace/vendor/__init__.py b/ddtrace/vendor/__init__.py index 2a1f73bb472..1b9596e82da 100644 --- a/ddtrace/vendor/__init__.py +++ b/ddtrace/vendor/__init__.py @@ -91,6 +91,33 @@ Notes: - We only vendor the packaging.version sub-module as this is all we currently need. + + +ply +--------- + +Source: https://github.com/dabeaz/ply +Version: 3.11 +License: BSD-3-Clause + +Notes: + - jsonpath-ng dependency + Did a "pip install jsonpath-ng" + Then went and looked at the contents of the ply packages + yacc.py and lex.py files here. + Didn't copy: cpp.py, ctokens.py, ygen.py (didn't see them used) + + +jsonpath-ng +--------- + +Source: https://github.com/h2non/jsonpath-ng +Version: 1.6.1 +License: Apache License 2.0 + +Notes: + - Copied ply into vendors as well. + Changed "-" to "_" as was causing errors when importing. """ # Initialize `ddtrace.vendor.datadog.base.log` logger with our custom rate limited logger diff --git a/ddtrace/vendor/jsonpath_ng/__init__.py b/ddtrace/vendor/jsonpath_ng/__init__.py new file mode 100644 index 00000000000..a17f881ae61 --- /dev/null +++ b/ddtrace/vendor/jsonpath_ng/__init__.py @@ -0,0 +1,6 @@ +from .jsonpath import * # noqa +from .parser import parse # noqa + + +# Current package version +__version__ = '1.6.1' \ No newline at end of file diff --git a/ddtrace/vendor/jsonpath_ng/exceptions.py b/ddtrace/vendor/jsonpath_ng/exceptions.py new file mode 100644 index 00000000000..7714802fe93 --- /dev/null +++ b/ddtrace/vendor/jsonpath_ng/exceptions.py @@ -0,0 +1,10 @@ +class JSONPathError(Exception): + pass + + +class JsonPathLexerError(JSONPathError): + pass + + +class JsonPathParserError(JSONPathError): + pass \ No newline at end of file diff --git a/ddtrace/vendor/jsonpath_ng/jsonpath.py b/ddtrace/vendor/jsonpath_ng/jsonpath.py new file mode 100644 index 00000000000..683ae65a230 --- /dev/null +++ b/ddtrace/vendor/jsonpath_ng/jsonpath.py @@ -0,0 +1,815 @@ +import logging +from itertools import * # noqa +from .lexer import JsonPathLexer + +# Get logger name +logger = logging.getLogger(__name__) + +# Turn on/off the automatic creation of id attributes +# ... could be a kwarg pervasively but uses are rare and simple today +auto_id_field = None + +NOT_SET = object() +LIST_KEY = object() + + +class JSONPath: + """ + The base class for JSONPath abstract syntax; those + methods stubbed here are the interface to supported + JSONPath semantics. + """ + + def find(self, data): + """ + All `JSONPath` types support `find()`, which returns an iterable of `DatumInContext`s. + They keep track of the path followed to the current location, so if the calling code + has some opinion about that, it can be passed in here as a starting point. + """ + raise NotImplementedError() + + def find_or_create(self, data): + return self.find(data) + + def update(self, data, val): + """ + Returns `data` with the specified path replaced by `val`. Only updates + if the specified path exists. + """ + + raise NotImplementedError() + + def update_or_create(self, data, val): + return self.update(data, val) + + def filter(self, fn, data): + """ + Returns `data` with the specified path filtering nodes according + the filter evaluation result returned by the filter function. + + Arguments: + fn (function): unary function that accepts one argument + and returns bool. + data (dict|list|tuple): JSON object to filter. + """ + + raise NotImplementedError() + + def child(self, child): + """ + Equivalent to Child(self, next) but with some canonicalization + """ + if isinstance(self, This) or isinstance(self, Root): + return child + elif isinstance(child, This): + return self + elif isinstance(child, Root): + return child + else: + return Child(self, child) + + def make_datum(self, value): + if isinstance(value, DatumInContext): + return value + else: + return DatumInContext(value, path=Root(), context=None) + + +class DatumInContext: + """ + Represents a datum along a path from a context. + + Essentially a zipper but with a structure represented by JsonPath, + and where the context is more of a parent pointer than a proper + representation of the context. + + For quick-and-dirty work, this proxies any non-special attributes + to the underlying datum, but the actual datum can (and usually should) + be retrieved via the `value` attribute. + + To place `datum` within another, use `datum.in_context(context=..., path=...)` + which extends the path. If the datum already has a context, it places the entire + context within that passed in, so an object can be built from the inside + out. + """ + @classmethod + def wrap(cls, data): + if isinstance(data, cls): + return data + else: + return cls(data) + + def __init__(self, value, path=None, context=None): + self.value = value + self.path = path or This() + self.context = None if context is None else DatumInContext.wrap(context) + + def in_context(self, context, path): + context = DatumInContext.wrap(context) + + if self.context: + return DatumInContext(value=self.value, path=self.path, context=context.in_context(path=path, context=context)) + else: + return DatumInContext(value=self.value, path=path, context=context) + + @property + def full_path(self): + return self.path if self.context is None else self.context.full_path.child(self.path) + + @property + def id_pseudopath(self): + """ + Looks like a path, but with ids stuck in when available + """ + try: + pseudopath = Fields(str(self.value[auto_id_field])) + except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions + pseudopath = self.path + + if self.context: + return self.context.id_pseudopath.child(pseudopath) + else: + return pseudopath + + def __repr__(self): + return '%s(value=%r, path=%r, context=%r)' % (self.__class__.__name__, self.value, self.path, self.context) + + def __eq__(self, other): + return isinstance(other, DatumInContext) and other.value == self.value and other.path == self.path and self.context == other.context + + +class AutoIdForDatum(DatumInContext): + """ + This behaves like a DatumInContext, but the value is + always the path leading up to it, not including the "id", + and with any "id" fields along the way replacing the prior + segment of the path + + For example, it will make "foo.bar.id" return a datum + that behaves like DatumInContext(value="foo.bar", path="foo.bar.id"). + + This is disabled by default; it can be turned on by + settings the `auto_id_field` global to a value other + than `None`. + """ + + def __init__(self, datum, id_field=None): + """ + Invariant is that datum.path is the path from context to datum. The auto id + will either be the id in the datum (if present) or the id of the context + followed by the path to the datum. + + The path to this datum is always the path to the context, the path to the + datum, and then the auto id field. + """ + self.datum = datum + self.id_field = id_field or auto_id_field + + @property + def value(self): + return str(self.datum.id_pseudopath) + + @property + def path(self): + return self.id_field + + @property + def context(self): + return self.datum + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, self.datum) + + def in_context(self, context, path): + return AutoIdForDatum(self.datum.in_context(context=context, path=path)) + + def __eq__(self, other): + return isinstance(other, AutoIdForDatum) and other.datum == self.datum and self.id_field == other.id_field + + +class Root(JSONPath): + """ + The JSONPath referring to the "root" object. Concrete syntax is '$'. + The root is the topmost datum without any context attached. + """ + + def find(self, data): + if not isinstance(data, DatumInContext): + return [DatumInContext(data, path=Root(), context=None)] + else: + if data.context is None: + return [DatumInContext(data.value, context=None, path=Root())] + else: + return Root().find(data.context) + + def update(self, data, val): + return val + + def filter(self, fn, data): + return data if fn(data) else None + + def __str__(self): + return '$' + + def __repr__(self): + return 'Root()' + + def __eq__(self, other): + return isinstance(other, Root) + + def __hash__(self): + return hash('$') + + +class This(JSONPath): + """ + The JSONPath referring to the current datum. Concrete syntax is '@'. + """ + + def find(self, datum): + return [DatumInContext.wrap(datum)] + + def update(self, data, val): + return val + + def filter(self, fn, data): + return data if fn(data) else None + + def __str__(self): + return '`this`' + + def __repr__(self): + return 'This()' + + def __eq__(self, other): + return isinstance(other, This) + + def __hash__(self): + return hash('this') + + +class Child(JSONPath): + """ + JSONPath that first matches the left, then the right. + Concrete syntax is '.' + """ + + def __init__(self, left, right): + self.left = left + self.right = right + + def find(self, datum): + """ + Extra special case: auto ids do not have children, + so cut it off right now rather than auto id the auto id + """ + + return [submatch + for subdata in self.left.find(datum) + if not isinstance(subdata, AutoIdForDatum) + for submatch in self.right.find(subdata)] + + def update(self, data, val): + for datum in self.left.find(data): + self.right.update(datum.value, val) + return data + + def find_or_create(self, datum): + datum = DatumInContext.wrap(datum) + submatches = [] + for subdata in self.left.find_or_create(datum): + if isinstance(subdata, AutoIdForDatum): + # Extra special case: auto ids do not have children, + # so cut it off right now rather than auto id the auto id + continue + for submatch in self.right.find_or_create(subdata): + submatches.append(submatch) + return submatches + + def update_or_create(self, data, val): + for datum in self.left.find_or_create(data): + self.right.update_or_create(datum.value, val) + return _clean_list_keys(data) + + def filter(self, fn, data): + for datum in self.left.find(data): + self.right.filter(fn, datum.value) + return data + + def __eq__(self, other): + return isinstance(other, Child) and self.left == other.left and self.right == other.right + + def __str__(self): + return '%s.%s' % (self.left, self.right) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right) + + def __hash__(self): + return hash((self.left, self.right)) + + +class Parent(JSONPath): + """ + JSONPath that matches the parent node of the current match. + Will crash if no such parent exists. + Available via named operator `parent`. + """ + + def find(self, datum): + datum = DatumInContext.wrap(datum) + return [datum.context] + + def __eq__(self, other): + return isinstance(other, Parent) + + def __str__(self): + return '`parent`' + + def __repr__(self): + return 'Parent()' + + def __hash__(self): + return hash('parent') + + +class Where(JSONPath): + """ + JSONPath that first matches the left, and then + filters for only those nodes that have + a match on the right. + + WARNING: Subject to change. May want to have "contains" + or some other better word for it. + """ + + def __init__(self, left, right): + self.left = left + self.right = right + + def find(self, data): + return [subdata for subdata in self.left.find(data) if self.right.find(subdata)] + + def update(self, data, val): + for datum in self.find(data): + datum.path.update(data, val) + return data + + def filter(self, fn, data): + for datum in self.find(data): + datum.path.filter(fn, datum.value) + return data + + def __str__(self): + return '%s where %s' % (self.left, self.right) + + def __eq__(self, other): + return isinstance(other, Where) and other.left == self.left and other.right == self.right + + def __hash__(self): + return hash((self.left, self.right)) + +class Descendants(JSONPath): + """ + JSONPath that matches first the left expression then any descendant + of it which matches the right expression. + """ + + def __init__(self, left, right): + self.left = left + self.right = right + + def find(self, datum): + # .. ==> . ( | *.. | [*]..) + # + # With with a wonky caveat that since Slice() has funky coercions + # we cannot just delegate to that equivalence or we'll hit an + # infinite loop. So right here we implement the coercion-free version. + + # Get all left matches into a list + left_matches = self.left.find(datum) + if not isinstance(left_matches, list): + left_matches = [left_matches] + + def match_recursively(datum): + right_matches = self.right.find(datum) + + # Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern + if isinstance(datum.value, list): + recursive_matches = [submatch + for i in range(0, len(datum.value)) + for submatch in match_recursively(DatumInContext(datum.value[i], context=datum, path=Index(i)))] + + elif isinstance(datum.value, dict): + recursive_matches = [submatch + for field in datum.value.keys() + for submatch in match_recursively(DatumInContext(datum.value[field], context=datum, path=Fields(field)))] + + else: + recursive_matches = [] + + return right_matches + list(recursive_matches) + + # TODO: repeatable iterator instead of list? + return [submatch + for left_match in left_matches + for submatch in match_recursively(left_match)] + + def is_singular(self): + return False + + def update(self, data, val): + # Get all left matches into a list + left_matches = self.left.find(data) + if not isinstance(left_matches, list): + left_matches = [left_matches] + + def update_recursively(data): + # Update only mutable values corresponding to JSON types + if not (isinstance(data, list) or isinstance(data, dict)): + return + + self.right.update(data, val) + + # Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern + if isinstance(data, list): + for i in range(0, len(data)): + update_recursively(data[i]) + + elif isinstance(data, dict): + for field in data.keys(): + update_recursively(data[field]) + + for submatch in left_matches: + update_recursively(submatch.value) + + return data + + def filter(self, fn, data): + # Get all left matches into a list + left_matches = self.left.find(data) + if not isinstance(left_matches, list): + left_matches = [left_matches] + + def filter_recursively(data): + # Update only mutable values corresponding to JSON types + if not (isinstance(data, list) or isinstance(data, dict)): + return + + self.right.filter(fn, data) + + # Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern + if isinstance(data, list): + for i in range(0, len(data)): + filter_recursively(data[i]) + + elif isinstance(data, dict): + for field in data.keys(): + filter_recursively(data[field]) + + for submatch in left_matches: + filter_recursively(submatch.value) + + return data + + def __str__(self): + return '%s..%s' % (self.left, self.right) + + def __eq__(self, other): + return isinstance(other, Descendants) and self.left == other.left and self.right == other.right + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right) + + def __hash__(self): + return hash((self.left, self.right)) + + +class Union(JSONPath): + """ + JSONPath that returns the union of the results of each match. + This is pretty shoddily implemented for now. The nicest semantics + in case of mismatched bits (list vs atomic) is to put + them all in a list, but I haven't done that yet. + + WARNING: Any appearance of this being the _concatenation_ is + coincidence. It may even be a bug! (or laziness) + """ + def __init__(self, left, right): + self.left = left + self.right = right + + def is_singular(self): + return False + + def find(self, data): + return self.left.find(data) + self.right.find(data) + + def __eq__(self, other): + return isinstance(other, Union) and self.left == other.left and self.right == other.right + + def __hash__(self): + return hash((self.left, self.right)) + +class Intersect(JSONPath): + """ + JSONPath for bits that match *both* patterns. + + This can be accomplished a couple of ways. The most + efficient is to actually build the intersected + AST as in building a state machine for matching the + intersection of regular languages. The next + idea is to build a filtered data and match against + that. + """ + def __init__(self, left, right): + self.left = left + self.right = right + + def is_singular(self): + return False + + def find(self, data): + raise NotImplementedError() + + def __eq__(self, other): + return isinstance(other, Intersect) and self.left == other.left and self.right == other.right + + def __hash__(self): + return hash((self.left, self.right)) + + +class Fields(JSONPath): + """ + JSONPath referring to some field of the current object. + Concrete syntax ix comma-separated field names. + + WARNING: If '*' is any of the field names, then they will + all be returned. + """ + + def __init__(self, *fields): + self.fields = fields + + @staticmethod + def get_field_datum(datum, field, create): + if field == auto_id_field: + return AutoIdForDatum(datum) + try: + field_value = datum.value.get(field, NOT_SET) + if field_value is NOT_SET: + if create: + datum.value[field] = field_value = {} + else: + return None + return DatumInContext(field_value, path=Fields(field), context=datum) + except (TypeError, AttributeError): + return None + + def reified_fields(self, datum): + if '*' not in self.fields: + return self.fields + else: + try: + fields = tuple(datum.value.keys()) + return fields if auto_id_field is None else fields + (auto_id_field,) + except AttributeError: + return () + + def find(self, datum): + return self._find_base(datum, create=False) + + def find_or_create(self, datum): + return self._find_base(datum, create=True) + + def _find_base(self, datum, create): + datum = DatumInContext.wrap(datum) + field_data = [self.get_field_datum(datum, field, create) + for field in self.reified_fields(datum)] + return [fd for fd in field_data if fd is not None] + + def update(self, data, val): + return self._update_base(data, val, create=False) + + def update_or_create(self, data, val): + return self._update_base(data, val, create=True) + + def _update_base(self, data, val, create): + if data is not None: + for field in self.reified_fields(DatumInContext.wrap(data)): + if create and field not in data: + data[field] = {} + if type(data) is not bool and field in data: + if hasattr(val, '__call__'): + data[field] = val(data[field], data, field) + else: + data[field] = val + return data + + def filter(self, fn, data): + if data is not None: + for field in self.reified_fields(DatumInContext.wrap(data)): + if field in data: + if fn(data[field]): + data.pop(field) + return data + + def __str__(self): + # If any JsonPathLexer.literals are included in field name need quotes + # This avoids unnecessary quotes to keep strings short. + # Test each field whether it contains a literal and only then add quotes + # The test loops over all literals, could possibly optimize to short circuit if one found + fields_as_str = ("'" + str(f) + "'" if any([l in f for l in JsonPathLexer.literals]) else + str(f) for f in self.fields) + return ','.join(fields_as_str) + + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, ','.join(map(repr, self.fields))) + + def __eq__(self, other): + return isinstance(other, Fields) and tuple(self.fields) == tuple(other.fields) + + def __hash__(self): + return hash(tuple(self.fields)) + + +class Index(JSONPath): + """ + JSONPath that matches indices of the current datum, or none if not large enough. + Concrete syntax is brackets. + + WARNING: If the datum is None or not long enough, it will not crash but will not match anything. + NOTE: For the concrete syntax of `[*]`, the abstract syntax is a Slice() with no parameters (equiv to `[:]` + """ + + def __init__(self, index): + self.index = index + + def find(self, datum): + return self._find_base(datum, create=False) + + def find_or_create(self, datum): + return self._find_base(datum, create=True) + + def _find_base(self, datum, create): + datum = DatumInContext.wrap(datum) + if create: + if datum.value == {}: + datum.value = _create_list_key(datum.value) + self._pad_value(datum.value) + if datum.value and len(datum.value) > self.index: + return [DatumInContext(datum.value[self.index], path=self, context=datum)] + else: + return [] + + def update(self, data, val): + return self._update_base(data, val, create=False) + + def update_or_create(self, data, val): + return self._update_base(data, val, create=True) + + def _update_base(self, data, val, create): + if create: + if data == {}: + data = _create_list_key(data) + self._pad_value(data) + if hasattr(val, '__call__'): + data[self.index] = val.__call__(data[self.index], data, self.index) + elif len(data) > self.index: + data[self.index] = val + return data + + def filter(self, fn, data): + if fn(data[self.index]): + data.pop(self.index) # relies on mutation :( + return data + + def __eq__(self, other): + return isinstance(other, Index) and self.index == other.index + + def __str__(self): + return '[%i]' % self.index + + def __repr__(self): + return '%s(index=%r)' % (self.__class__.__name__, self.index) + + def _pad_value(self, value): + if len(value) <= self.index: + pad = self.index - len(value) + 1 + value += [{} for __ in range(pad)] + + def __hash__(self): + return hash(self.index) + + +class Slice(JSONPath): + """ + JSONPath matching a slice of an array. + + Because of a mismatch between JSON and XML when schema-unaware, + this always returns an iterable; if the incoming data + was not a list, then it returns a one element list _containing_ that + data. + + Consider these two docs, and their schema-unaware translation to JSON: + + hello ==> {"a": {"b": "hello"}} + hellogoodbye ==> {"a": {"b": ["hello", "goodbye"]}} + + If there were a schema, it would be known that "b" should always be an + array (unless the schema were wonky, but that is too much to fix here) + so when querying with JSON if the one writing the JSON knows that it + should be an array, they can write a slice operator and it will coerce + a non-array value to an array. + + This may be a bit unfortunate because it would be nice to always have + an iterator, but dictionaries and other objects may also be iterable, + so this is the compromise. + """ + def __init__(self, start=None, end=None, step=None): + self.start = start + self.end = end + self.step = step + + def find(self, datum): + datum = DatumInContext.wrap(datum) + + # Used for catching null value instead of empty list in path + if not datum.value: + return [] + # Here's the hack. If it is a dictionary or some kind of constant, + # put it in a single-element list + if (isinstance(datum.value, dict) or isinstance(datum.value, int) or isinstance(datum.value, str)): + return self.find(DatumInContext([datum.value], path=datum.path, context=datum.context)) + + # Some iterators do not support slicing but we can still + # at least work for '*' + if self.start is None and self.end is None and self.step is None: + return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in range(0, len(datum.value))] + else: + return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in range(0, len(datum.value))[self.start:self.end:self.step]] + + def update(self, data, val): + for datum in self.find(data): + datum.path.update(data, val) + return data + + def filter(self, fn, data): + while True: + length = len(data) + for datum in self.find(data): + data = datum.path.filter(fn, data) + if len(data) < length: + break + + if length == len(data): + break + return data + + def __str__(self): + if self.start is None and self.end is None and self.step is None: + return '[*]' + else: + return '[%s%s%s]' % (self.start or '', + ':%d'%self.end if self.end else '', + ':%d'%self.step if self.step else '') + + def __repr__(self): + return '%s(start=%r,end=%r,step=%r)' % (self.__class__.__name__, self.start, self.end, self.step) + + def __eq__(self, other): + return isinstance(other, Slice) and other.start == self.start and self.end == other.end and other.step == self.step + + def __hash__(self): + return hash((self.start, self.end, self.step)) + + +def _create_list_key(dict_): + """ + Adds a list to a dictionary by reference and returns the list. + + See `_clean_list_keys()` + """ + dict_[LIST_KEY] = new_list = [{}] + return new_list + + +def _clean_list_keys(struct_): + """ + Replace {LIST_KEY: ['foo', 'bar']} with ['foo', 'bar']. + + >>> _clean_list_keys({LIST_KEY: ['foo', 'bar']}) + ['foo', 'bar'] + + """ + if(isinstance(struct_, list)): + for ind, value in enumerate(struct_): + struct_[ind] = _clean_list_keys(value) + elif(isinstance(struct_, dict)): + if(LIST_KEY in struct_): + return _clean_list_keys(struct_[LIST_KEY]) + else: + for key, value in struct_.items(): + struct_[key] = _clean_list_keys(value) + return struct_ \ No newline at end of file diff --git a/ddtrace/vendor/jsonpath_ng/lexer.py b/ddtrace/vendor/jsonpath_ng/lexer.py new file mode 100644 index 00000000000..055447a5bd9 --- /dev/null +++ b/ddtrace/vendor/jsonpath_ng/lexer.py @@ -0,0 +1,171 @@ +import sys +import logging + +from ..ply.lex import lex + +from .exceptions import JsonPathLexerError + +logger = logging.getLogger(__name__) + + +class JsonPathLexer: + ''' + A Lexical analyzer for JsonPath. + ''' + + def __init__(self, debug=False): + self.debug = debug + if self.__doc__ is None: + raise JsonPathLexerError('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.') + + def tokenize(self, string): + ''' + Maps a string to an iterator over tokens. In other words: [char] -> [token] + ''' + + new_lexer = lex(module=self, debug=self.debug, errorlog=logger) + new_lexer.latest_newline = 0 + new_lexer.string_value = None + new_lexer.input(string) + + while True: + t = new_lexer.token() + if t is None: + break + t.col = t.lexpos - new_lexer.latest_newline + yield t + + if new_lexer.string_value is not None: + raise JsonPathLexerError('Unexpected EOF in string literal or identifier') + + # ============== PLY Lexer specification ================== + # + # This probably should be private but: + # - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency) + # - things like `literals` might be a legitimate part of the public interface. + # + # Anyhow, it is pythonic to give some rope to hang oneself with :-) + + literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&', '~'] + + reserved_words = { 'where': 'WHERE' } + + tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values()) + + states = [ ('singlequote', 'exclusive'), + ('doublequote', 'exclusive'), + ('backquote', 'exclusive') ] + + # Normal lexing, rather easy + t_DOUBLEDOT = r'\.\.' + t_ignore = ' \t' + + def t_ID(self, t): + r'[a-zA-Z_@][a-zA-Z0-9_@\-]*' + t.type = self.reserved_words.get(t.value, 'ID') + return t + + def t_NUMBER(self, t): + r'-?\d+' + t.value = int(t.value) + return t + + + # Single-quoted strings + t_singlequote_ignore = '' + def t_singlequote(self, t): + r"'" + t.lexer.string_start = t.lexer.lexpos + t.lexer.string_value = '' + t.lexer.push_state('singlequote') + + def t_singlequote_content(self, t): + r"[^'\\]+" + t.lexer.string_value += t.value + + def t_singlequote_escape(self, t): + r'\\.' + t.lexer.string_value += t.value[1] + + def t_singlequote_end(self, t): + r"'" + t.value = t.lexer.string_value + t.type = 'ID' + t.lexer.string_value = None + t.lexer.pop_state() + return t + + def t_singlequote_error(self, t): + raise JsonPathLexerError('Error on line %s, col %s while lexing singlequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0])) + + + # Double-quoted strings + t_doublequote_ignore = '' + def t_doublequote(self, t): + r'"' + t.lexer.string_start = t.lexer.lexpos + t.lexer.string_value = '' + t.lexer.push_state('doublequote') + + def t_doublequote_content(self, t): + r'[^"\\]+' + t.lexer.string_value += t.value + + def t_doublequote_escape(self, t): + r'\\.' + t.lexer.string_value += t.value[1] + + def t_doublequote_end(self, t): + r'"' + t.value = t.lexer.string_value + t.type = 'ID' + t.lexer.string_value = None + t.lexer.pop_state() + return t + + def t_doublequote_error(self, t): + raise JsonPathLexerError('Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0])) + + + # Back-quoted "magic" operators + t_backquote_ignore = '' + def t_backquote(self, t): + r'`' + t.lexer.string_start = t.lexer.lexpos + t.lexer.string_value = '' + t.lexer.push_state('backquote') + + def t_backquote_escape(self, t): + r'\\.' + t.lexer.string_value += t.value[1] + + def t_backquote_content(self, t): + r"[^`\\]+" + t.lexer.string_value += t.value + + def t_backquote_end(self, t): + r'`' + t.value = t.lexer.string_value + t.type = 'NAMED_OPERATOR' + t.lexer.string_value = None + t.lexer.pop_state() + return t + + def t_backquote_error(self, t): + raise JsonPathLexerError('Error on line %s, col %s while lexing backquoted operator: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0])) + + + # Counting lines, handling errors + def t_newline(self, t): + r'\n' + t.lexer.lineno += 1 + t.lexer.latest_newline = t.lexpos + + def t_error(self, t): + raise JsonPathLexerError('Error on line %s, col %s: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0])) + +if __name__ == '__main__': + logging.basicConfig() + lexer = JsonPathLexer(debug=True) + for token in lexer.tokenize(sys.stdin.read()): + print('%-20s%s' % (token.value, token.type)) \ No newline at end of file diff --git a/ddtrace/vendor/jsonpath_ng/parser.py b/ddtrace/vendor/jsonpath_ng/parser.py new file mode 100644 index 00000000000..755916bda89 --- /dev/null +++ b/ddtrace/vendor/jsonpath_ng/parser.py @@ -0,0 +1,198 @@ +import logging +import sys +import os.path + +from ..ply.yacc import yacc + +from .exceptions import JsonPathParserError +from .jsonpath import * +from .lexer import JsonPathLexer + +logger = logging.getLogger(__name__) + + +def parse(string): + return JsonPathParser().parse(string) + + +class JsonPathParser: + ''' + An LALR-parser for JsonPath + ''' + + tokens = JsonPathLexer.tokens + + def __init__(self, debug=False, lexer_class=None): + if self.__doc__ is None: + raise JsonPathParserError( + 'Docstrings have been removed! By design of PLY, ' + 'jsonpath-rw requires docstrings. You must not use ' + 'PYTHONOPTIMIZE=2 or python -OO.' + ) + + self.debug = debug + self.lexer_class = lexer_class or JsonPathLexer # Crufty but works around statefulness in PLY + + # Since PLY has some crufty aspects and dumps files, we try to keep them local + # However, we need to derive the name of the output Python file :-/ + output_directory = os.path.dirname(__file__) + try: + module_name = os.path.splitext(os.path.split(__file__)[1])[0] + except: + module_name = __name__ + + start_symbol = 'jsonpath' + parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab']) + + # Generate the parse table + self.parser = yacc(module=self, + debug=self.debug, + tabmodule = parsing_table_module, + outputdir = output_directory, + write_tables=0, + start = start_symbol, + errorlog = logger) + + def parse(self, string, lexer = None): + lexer = lexer or self.lexer_class() + return self.parse_token_stream(lexer.tokenize(string)) + + def parse_token_stream(self, token_iterator): + return self.parser.parse(lexer = IteratorToTokenStream(token_iterator)) + + # ===================== PLY Parser specification ===================== + + precedence = [ + ('left', ','), + ('left', 'DOUBLEDOT'), + ('left', '.'), + ('left', '|'), + ('left', '&'), + ('left', 'WHERE'), + ] + + def p_error(self, t): + if t is None: + raise JsonPathParserError('Parse error near the end of string!') + raise JsonPathParserError('Parse error at %s:%s near token %s (%s)' + % (t.lineno, t.col, t.value, t.type)) + + def p_jsonpath_binop(self, p): + """jsonpath : jsonpath '.' jsonpath + | jsonpath DOUBLEDOT jsonpath + | jsonpath WHERE jsonpath + | jsonpath '|' jsonpath + | jsonpath '&' jsonpath""" + op = p[2] + + if op == '.': + p[0] = Child(p[1], p[3]) + elif op == '..': + p[0] = Descendants(p[1], p[3]) + elif op == 'where': + p[0] = Where(p[1], p[3]) + elif op == '|': + p[0] = Union(p[1], p[3]) + elif op == '&': + p[0] = Intersect(p[1], p[3]) + + def p_jsonpath_fields(self, p): + "jsonpath : fields_or_any" + p[0] = Fields(*p[1]) + + def p_jsonpath_named_operator(self, p): + "jsonpath : NAMED_OPERATOR" + if p[1] == 'this': + p[0] = This() + elif p[1] == 'parent': + p[0] = Parent() + else: + raise JsonPathParserError('Unknown named operator `%s` at %s:%s' + % (p[1], p.lineno(1), p.lexpos(1))) + + def p_jsonpath_root(self, p): + "jsonpath : '$'" + p[0] = Root() + + def p_jsonpath_idx(self, p): + "jsonpath : '[' idx ']'" + p[0] = p[2] + + def p_jsonpath_slice(self, p): + "jsonpath : '[' slice ']'" + p[0] = p[2] + + def p_jsonpath_fieldbrackets(self, p): + "jsonpath : '[' fields ']'" + p[0] = Fields(*p[2]) + + def p_jsonpath_child_fieldbrackets(self, p): + "jsonpath : jsonpath '[' fields ']'" + p[0] = Child(p[1], Fields(*p[3])) + + def p_jsonpath_child_idxbrackets(self, p): + "jsonpath : jsonpath '[' idx ']'" + p[0] = Child(p[1], p[3]) + + def p_jsonpath_child_slicebrackets(self, p): + "jsonpath : jsonpath '[' slice ']'" + p[0] = Child(p[1], p[3]) + + def p_jsonpath_parens(self, p): + "jsonpath : '(' jsonpath ')'" + p[0] = p[2] + + # Because fields in brackets cannot be '*' - that is reserved for array indices + def p_fields_or_any(self, p): + """fields_or_any : fields + | '*' """ + if p[1] == '*': + p[0] = ['*'] + else: + p[0] = p[1] + + def p_fields_id(self, p): + "fields : ID" + p[0] = [p[1]] + + def p_fields_comma(self, p): + "fields : fields ',' fields" + p[0] = p[1] + p[3] + + def p_idx(self, p): + "idx : NUMBER" + p[0] = Index(p[1]) + + def p_slice_any(self, p): + "slice : '*'" + p[0] = Slice() + + def p_slice(self, p): # Currently does not support `step` + """slice : maybe_int ':' maybe_int + | maybe_int ':' maybe_int ':' maybe_int """ + p[0] = Slice(*p[1::2]) + + def p_maybe_int(self, p): + """maybe_int : NUMBER + | empty""" + p[0] = p[1] + + def p_empty(self, p): + 'empty :' + p[0] = None + +class IteratorToTokenStream: + def __init__(self, iterator): + self.iterator = iterator + + def token(self): + try: + return next(self.iterator) + except StopIteration: + return None + + +if __name__ == '__main__': + logging.basicConfig() + parser = JsonPathParser(debug=True) + print(parser.parse(sys.stdin.read())) \ No newline at end of file diff --git a/ddtrace/vendor/ply/__init__.py b/ddtrace/vendor/ply/__init__.py new file mode 100644 index 00000000000..23707c63541 --- /dev/null +++ b/ddtrace/vendor/ply/__init__.py @@ -0,0 +1,5 @@ +# PLY package +# Author: David Beazley (dave@dabeaz.com) + +__version__ = '3.11' +__all__ = ['lex','yacc'] diff --git a/ddtrace/vendor/ply/lex.py b/ddtrace/vendor/ply/lex.py new file mode 100644 index 00000000000..f95bcdbf1bb --- /dev/null +++ b/ddtrace/vendor/ply/lex.py @@ -0,0 +1,1098 @@ +# ----------------------------------------------------------------------------- +# ply: lex.py +# +# Copyright (C) 2001-2018 +# David M. Beazley (Dabeaz LLC) +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the David Beazley or Dabeaz LLC may be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------------------------- + +__version__ = '3.11' +__tabversion__ = '3.10' + +import re +import sys +import types +import copy +import os +import inspect + +# This tuple contains known string types +try: + # Python 2.6 + StringTypes = (types.StringType, types.UnicodeType) +except AttributeError: + # Python 3.0 + StringTypes = (str, bytes) + +# This regular expression is used to match valid token names +_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') + +# Exception thrown when invalid token encountered and no default error +# handler is defined. +class LexError(Exception): + def __init__(self, message, s): + self.args = (message,) + self.text = s + + +# Token class. This class is used to represent the tokens produced. +class LexToken(object): + def __str__(self): + return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) + + def __repr__(self): + return str(self) + + +# This object is a stand-in for a logging object created by the +# logging module. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def critical(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + info = critical + debug = critical + + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + + +# ----------------------------------------------------------------------------- +# === Lexing Engine === +# +# The following Lexer class implements the lexer runtime. There are only +# a few public methods and attributes: +# +# input() - Store a new string in the lexer +# token() - Get the next token +# clone() - Clone the lexer +# +# lineno - Current line number +# lexpos - Current position in the input string +# ----------------------------------------------------------------------------- + +class Lexer: + def __init__(self): + self.lexre = None # Master regular expression. This is a list of + # tuples (re, findex) where re is a compiled + # regular expression and findex is a list + # mapping regex group numbers to rules + self.lexretext = None # Current regular expression strings + self.lexstatere = {} # Dictionary mapping lexer states to master regexs + self.lexstateretext = {} # Dictionary mapping lexer states to regex strings + self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names + self.lexstate = 'INITIAL' # Current lexer state + self.lexstatestack = [] # Stack of lexer states + self.lexstateinfo = None # State information + self.lexstateignore = {} # Dictionary of ignored characters for each state + self.lexstateerrorf = {} # Dictionary of error functions for each state + self.lexstateeoff = {} # Dictionary of eof functions for each state + self.lexreflags = 0 # Optional re compile flags + self.lexdata = None # Actual input data (as a string) + self.lexpos = 0 # Current position in input text + self.lexlen = 0 # Length of the input text + self.lexerrorf = None # Error rule (if any) + self.lexeoff = None # EOF rule (if any) + self.lextokens = None # List of valid tokens + self.lexignore = '' # Ignored characters + self.lexliterals = '' # Literal characters that can be passed through + self.lexmodule = None # Module + self.lineno = 1 # Current line number + self.lexoptimize = False # Optimized mode + + def clone(self, object=None): + c = copy.copy(self) + + # If the object parameter has been supplied, it means we are attaching the + # lexer to a new object. In this case, we have to rebind all methods in + # the lexstatere and lexstateerrorf tables. + + if object: + newtab = {} + for key, ritem in self.lexstatere.items(): + newre = [] + for cre, findex in ritem: + newfindex = [] + for f in findex: + if not f or not f[0]: + newfindex.append(f) + continue + newfindex.append((getattr(object, f[0].__name__), f[1])) + newre.append((cre, newfindex)) + newtab[key] = newre + c.lexstatere = newtab + c.lexstateerrorf = {} + for key, ef in self.lexstateerrorf.items(): + c.lexstateerrorf[key] = getattr(object, ef.__name__) + c.lexmodule = object + return c + + # ------------------------------------------------------------ + # writetab() - Write lexer information to a table file + # ------------------------------------------------------------ + def writetab(self, lextab, outputdir=''): + if isinstance(lextab, types.ModuleType): + raise IOError("Won't overwrite existing lextab module") + basetabmodule = lextab.split('.')[-1] + filename = os.path.join(outputdir, basetabmodule) + '.py' + with open(filename, 'w') as tf: + tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) + tf.write('_tabversion = %s\n' % repr(__tabversion__)) + tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) + tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags))) + tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) + tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) + + # Rewrite the lexstatere table, replacing function objects with function names + tabre = {} + for statename, lre in self.lexstatere.items(): + titem = [] + for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): + titem.append((retext, _funcs_to_names(func, renames))) + tabre[statename] = titem + + tf.write('_lexstatere = %s\n' % repr(tabre)) + tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) + + taberr = {} + for statename, ef in self.lexstateerrorf.items(): + taberr[statename] = ef.__name__ if ef else None + tf.write('_lexstateerrorf = %s\n' % repr(taberr)) + + tabeof = {} + for statename, ef in self.lexstateeoff.items(): + tabeof[statename] = ef.__name__ if ef else None + tf.write('_lexstateeoff = %s\n' % repr(tabeof)) + + # ------------------------------------------------------------ + # readtab() - Read lexer information from a tab file + # ------------------------------------------------------------ + def readtab(self, tabfile, fdict): + if isinstance(tabfile, types.ModuleType): + lextab = tabfile + else: + exec('import %s' % tabfile) + lextab = sys.modules[tabfile] + + if getattr(lextab, '_tabversion', '0.0') != __tabversion__: + raise ImportError('Inconsistent PLY version') + + self.lextokens = lextab._lextokens + self.lexreflags = lextab._lexreflags + self.lexliterals = lextab._lexliterals + self.lextokens_all = self.lextokens | set(self.lexliterals) + self.lexstateinfo = lextab._lexstateinfo + self.lexstateignore = lextab._lexstateignore + self.lexstatere = {} + self.lexstateretext = {} + for statename, lre in lextab._lexstatere.items(): + titem = [] + txtitem = [] + for pat, func_name in lre: + titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) + + self.lexstatere[statename] = titem + self.lexstateretext[statename] = txtitem + + self.lexstateerrorf = {} + for statename, ef in lextab._lexstateerrorf.items(): + self.lexstateerrorf[statename] = fdict[ef] + + self.lexstateeoff = {} + for statename, ef in lextab._lexstateeoff.items(): + self.lexstateeoff[statename] = fdict[ef] + + self.begin('INITIAL') + + # ------------------------------------------------------------ + # input() - Push a new string into the lexer + # ------------------------------------------------------------ + def input(self, s): + # Pull off the first character to see if s looks like a string + c = s[:1] + if not isinstance(c, StringTypes): + raise ValueError('Expected a string') + self.lexdata = s + self.lexpos = 0 + self.lexlen = len(s) + + # ------------------------------------------------------------ + # begin() - Changes the lexing state + # ------------------------------------------------------------ + def begin(self, state): + if state not in self.lexstatere: + raise ValueError('Undefined state') + self.lexre = self.lexstatere[state] + self.lexretext = self.lexstateretext[state] + self.lexignore = self.lexstateignore.get(state, '') + self.lexerrorf = self.lexstateerrorf.get(state, None) + self.lexeoff = self.lexstateeoff.get(state, None) + self.lexstate = state + + # ------------------------------------------------------------ + # push_state() - Changes the lexing state and saves old on stack + # ------------------------------------------------------------ + def push_state(self, state): + self.lexstatestack.append(self.lexstate) + self.begin(state) + + # ------------------------------------------------------------ + # pop_state() - Restores the previous state + # ------------------------------------------------------------ + def pop_state(self): + self.begin(self.lexstatestack.pop()) + + # ------------------------------------------------------------ + # current_state() - Returns the current lexing state + # ------------------------------------------------------------ + def current_state(self): + return self.lexstate + + # ------------------------------------------------------------ + # skip() - Skip ahead n characters + # ------------------------------------------------------------ + def skip(self, n): + self.lexpos += n + + # ------------------------------------------------------------ + # opttoken() - Return the next token from the Lexer + # + # Note: This function has been carefully implemented to be as fast + # as possible. Don't make changes unless you really know what + # you are doing + # ------------------------------------------------------------ + def token(self): + # Make local copies of frequently referenced attributes + lexpos = self.lexpos + lexlen = self.lexlen + lexignore = self.lexignore + lexdata = self.lexdata + + while lexpos < lexlen: + # This code provides some short-circuit code for whitespace, tabs, and other ignored characters + if lexdata[lexpos] in lexignore: + lexpos += 1 + continue + + # Look for a regular expression match + for lexre, lexindexfunc in self.lexre: + m = lexre.match(lexdata, lexpos) + if not m: + continue + + # Create a token for return + tok = LexToken() + tok.value = m.group() + tok.lineno = self.lineno + tok.lexpos = lexpos + + i = m.lastindex + func, tok.type = lexindexfunc[i] + + if not func: + # If no token type was set, it's an ignored token + if tok.type: + self.lexpos = m.end() + return tok + else: + lexpos = m.end() + break + + lexpos = m.end() + + # If token is processed by a function, call it + + tok.lexer = self # Set additional attributes useful in token rules + self.lexmatch = m + self.lexpos = lexpos + + newtok = func(tok) + + # Every function must return a token, if nothing, we just move to next token + if not newtok: + lexpos = self.lexpos # This is here in case user has updated lexpos. + lexignore = self.lexignore # This is here in case there was a state change + break + + # Verify type of the token. If not in the token map, raise an error + if not self.lexoptimize: + if newtok.type not in self.lextokens_all: + raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( + func.__code__.co_filename, func.__code__.co_firstlineno, + func.__name__, newtok.type), lexdata[lexpos:]) + + return newtok + else: + # No match, see if in literals + if lexdata[lexpos] in self.lexliterals: + tok = LexToken() + tok.value = lexdata[lexpos] + tok.lineno = self.lineno + tok.type = tok.value + tok.lexpos = lexpos + self.lexpos = lexpos + 1 + return tok + + # No match. Call t_error() if defined. + if self.lexerrorf: + tok = LexToken() + tok.value = self.lexdata[lexpos:] + tok.lineno = self.lineno + tok.type = 'error' + tok.lexer = self + tok.lexpos = lexpos + self.lexpos = lexpos + newtok = self.lexerrorf(tok) + if lexpos == self.lexpos: + # Error method didn't change text position at all. This is an error. + raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) + lexpos = self.lexpos + if not newtok: + continue + return newtok + + self.lexpos = lexpos + raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) + + if self.lexeoff: + tok = LexToken() + tok.type = 'eof' + tok.value = '' + tok.lineno = self.lineno + tok.lexpos = lexpos + tok.lexer = self + self.lexpos = lexpos + newtok = self.lexeoff(tok) + return newtok + + self.lexpos = lexpos + 1 + if self.lexdata is None: + raise RuntimeError('No input string given with input()') + return None + + # Iterator interface + def __iter__(self): + return self + + def next(self): + t = self.token() + if t is None: + raise StopIteration + return t + + __next__ = next + +# ----------------------------------------------------------------------------- +# ==== Lex Builder === +# +# The functions and classes below are used to collect lexing information +# and build a Lexer object from it. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# _get_regex(func) +# +# Returns the regular expression assigned to a function either as a doc string +# or as a .regex attribute attached by the @TOKEN decorator. +# ----------------------------------------------------------------------------- +def _get_regex(func): + return getattr(func, 'regex', func.__doc__) + +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# _funcs_to_names() +# +# Given a list of regular expression functions, this converts it to a list +# suitable for output to a table file +# ----------------------------------------------------------------------------- +def _funcs_to_names(funclist, namelist): + result = [] + for f, name in zip(funclist, namelist): + if f and f[0]: + result.append((name, f[1])) + else: + result.append(f) + return result + +# ----------------------------------------------------------------------------- +# _names_to_funcs() +# +# Given a list of regular expression function names, this converts it back to +# functions. +# ----------------------------------------------------------------------------- +def _names_to_funcs(namelist, fdict): + result = [] + for n in namelist: + if n and n[0]: + result.append((fdict[n[0]], n[1])) + else: + result.append(n) + return result + +# ----------------------------------------------------------------------------- +# _form_master_re() +# +# This function takes a list of all of the regex components and attempts to +# form the master regular expression. Given limitations in the Python re +# module, it may be necessary to break the master regex into separate expressions. +# ----------------------------------------------------------------------------- +def _form_master_re(relist, reflags, ldict, toknames): + if not relist: + return [] + regex = '|'.join(relist) + try: + lexre = re.compile(regex, reflags) + + # Build the index to function map for the matching engine + lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) + lexindexnames = lexindexfunc[:] + + for f, i in lexre.groupindex.items(): + handle = ldict.get(f, None) + if type(handle) in (types.FunctionType, types.MethodType): + lexindexfunc[i] = (handle, toknames[f]) + lexindexnames[i] = f + elif handle is not None: + lexindexnames[i] = f + if f.find('ignore_') > 0: + lexindexfunc[i] = (None, None) + else: + lexindexfunc[i] = (None, toknames[f]) + + return [(lexre, lexindexfunc)], [regex], [lexindexnames] + except Exception: + m = int(len(relist)/2) + if m == 0: + m = 1 + llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) + rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) + return (llist+rlist), (lre+rre), (lnames+rnames) + +# ----------------------------------------------------------------------------- +# def _statetoken(s,names) +# +# Given a declaration name s of the form "t_" and a dictionary whose keys are +# state names, this function returns a tuple (states,tokenname) where states +# is a tuple of state names and tokenname is the name of the token. For example, +# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') +# ----------------------------------------------------------------------------- +def _statetoken(s, names): + parts = s.split('_') + for i, part in enumerate(parts[1:], 1): + if part not in names and part != 'ANY': + break + + if i > 1: + states = tuple(parts[1:i]) + else: + states = ('INITIAL',) + + if 'ANY' in states: + states = tuple(names) + + tokenname = '_'.join(parts[i:]) + return (states, tokenname) + + +# ----------------------------------------------------------------------------- +# LexerReflect() +# +# This class represents information needed to build a lexer as extracted from a +# user's input file. +# ----------------------------------------------------------------------------- +class LexerReflect(object): + def __init__(self, ldict, log=None, reflags=0): + self.ldict = ldict + self.error_func = None + self.tokens = [] + self.reflags = reflags + self.stateinfo = {'INITIAL': 'inclusive'} + self.modules = set() + self.error = False + self.log = PlyLogger(sys.stderr) if log is None else log + + # Get all of the basic information + def get_all(self): + self.get_tokens() + self.get_literals() + self.get_states() + self.get_rules() + + # Validate all of the information + def validate_all(self): + self.validate_tokens() + self.validate_literals() + self.validate_rules() + return self.error + + # Get the tokens map + def get_tokens(self): + tokens = self.ldict.get('tokens', None) + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = tokens + + # Validate the tokens + def validate_tokens(self): + terminals = {} + for n in self.tokens: + if not _is_identifier.match(n): + self.log.error("Bad token name '%s'", n) + self.error = True + if n in terminals: + self.log.warning("Token '%s' multiply defined", n) + terminals[n] = 1 + + # Get the literals specifier + def get_literals(self): + self.literals = self.ldict.get('literals', '') + if not self.literals: + self.literals = '' + + # Validate literals + def validate_literals(self): + try: + for c in self.literals: + if not isinstance(c, StringTypes) or len(c) > 1: + self.log.error('Invalid literal %s. Must be a single character', repr(c)) + self.error = True + + except TypeError: + self.log.error('Invalid literals specification. literals must be a sequence of characters') + self.error = True + + def get_states(self): + self.states = self.ldict.get('states', None) + # Build statemap + if self.states: + if not isinstance(self.states, (tuple, list)): + self.log.error('states must be defined as a tuple or list') + self.error = True + else: + for s in self.states: + if not isinstance(s, tuple) or len(s) != 2: + self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) + self.error = True + continue + name, statetype = s + if not isinstance(name, StringTypes): + self.log.error('State name %s must be a string', repr(name)) + self.error = True + continue + if not (statetype == 'inclusive' or statetype == 'exclusive'): + self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) + self.error = True + continue + if name in self.stateinfo: + self.log.error("State '%s' already defined", name) + self.error = True + continue + self.stateinfo[name] = statetype + + # Get all of the symbols with a t_ prefix and sort them into various + # categories (functions, strings, error functions, and ignore characters) + + def get_rules(self): + tsymbols = [f for f in self.ldict if f[:2] == 't_'] + + # Now build up a list of functions and a list of strings + self.toknames = {} # Mapping of symbols to token names + self.funcsym = {} # Symbols defined as functions + self.strsym = {} # Symbols defined as strings + self.ignore = {} # Ignore strings by state + self.errorf = {} # Error functions by state + self.eoff = {} # EOF functions by state + + for s in self.stateinfo: + self.funcsym[s] = [] + self.strsym[s] = [] + + if len(tsymbols) == 0: + self.log.error('No rules of the form t_rulename are defined') + self.error = True + return + + for f in tsymbols: + t = self.ldict[f] + states, tokname = _statetoken(f, self.stateinfo) + self.toknames[f] = tokname + + if hasattr(t, '__call__'): + if tokname == 'error': + for s in states: + self.errorf[s] = t + elif tokname == 'eof': + for s in states: + self.eoff[s] = t + elif tokname == 'ignore': + line = t.__code__.co_firstlineno + file = t.__code__.co_filename + self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) + self.error = True + else: + for s in states: + self.funcsym[s].append((f, t)) + elif isinstance(t, StringTypes): + if tokname == 'ignore': + for s in states: + self.ignore[s] = t + if '\\' in t: + self.log.warning("%s contains a literal backslash '\\'", f) + + elif tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", f) + self.error = True + else: + for s in states: + self.strsym[s].append((f, t)) + else: + self.log.error('%s not defined as a function or string', f) + self.error = True + + # Sort the functions by line number + for f in self.funcsym.values(): + f.sort(key=lambda x: x[1].__code__.co_firstlineno) + + # Sort the strings by regular expression length + for s in self.strsym.values(): + s.sort(key=lambda x: len(x[1]), reverse=True) + + # Validate all of the t_rules collected + def validate_rules(self): + for state in self.stateinfo: + # Validate all rules defined by functions + + for fname, f in self.funcsym[state]: + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + tokname = self.toknames[fname] + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + continue + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + continue + + if not _get_regex(f): + self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) + if c.match(''): + self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) + self.error = True + except re.error as e: + self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) + if '#' in _get_regex(f): + self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) + self.error = True + + # Validate all rules defined by strings + for name, r in self.strsym[state]: + tokname = self.toknames[name] + if tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", name) + self.error = True + continue + + if tokname not in self.tokens and tokname.find('ignore_') < 0: + self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) + if (c.match('')): + self.log.error("Regular expression for rule '%s' matches empty string", name) + self.error = True + except re.error as e: + self.log.error("Invalid regular expression for rule '%s'. %s", name, e) + if '#' in r: + self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) + self.error = True + + if not self.funcsym[state] and not self.strsym[state]: + self.log.error("No rules defined for state '%s'", state) + self.error = True + + # Validate the error function + efunc = self.errorf.get(state, None) + if efunc: + f = efunc + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + + for module in self.modules: + self.validate_module(module) + + # ----------------------------------------------------------------------------- + # validate_module() + # + # This checks to see if there are duplicated t_rulename() functions or strings + # in the parser input file. This is done using a simple regular expression + # match on each line in the source code of the given module. + # ----------------------------------------------------------------------------- + + def validate_module(self, module): + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + return + + fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') + sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') + + counthash = {} + linen += 1 + for line in lines: + m = fre.match(line) + if not m: + m = sre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) + self.error = True + linen += 1 + +# ----------------------------------------------------------------------------- +# lex(module) +# +# Build all of the regular expression rules from definitions in the supplied module +# ----------------------------------------------------------------------------- +def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', + reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): + + if lextab is None: + lextab = 'lextab' + + global lexer + + ldict = None + stateinfo = {'INITIAL': 'inclusive'} + lexobj = Lexer() + lexobj.lexoptimize = optimize + global token, input + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + if debug: + if debuglog is None: + debuglog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the lexer + if object: + module = object + + # Get the module dictionary used for the parser + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + ldict = dict(_items) + # If no __file__ attribute is available, try to obtain it from the __module__ instead + if '__file__' not in ldict: + ldict['__file__'] = sys.modules[ldict['__module__']].__file__ + else: + ldict = get_caller_module_dict(2) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = ldict.get('__package__') + if pkg and isinstance(lextab, str): + if '.' not in lextab: + lextab = pkg + '.' + lextab + + # Collect parser information from the dictionary + linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) + linfo.get_all() + if not optimize: + if linfo.validate_all(): + raise SyntaxError("Can't build lexer") + + if optimize and lextab: + try: + lexobj.readtab(lextab, ldict) + token = lexobj.token + input = lexobj.input + lexer = lexobj + return lexobj + + except ImportError: + pass + + # Dump some basic debugging information + if debug: + debuglog.info('lex: tokens = %r', linfo.tokens) + debuglog.info('lex: literals = %r', linfo.literals) + debuglog.info('lex: states = %r', linfo.stateinfo) + + # Build a dictionary of valid token names + lexobj.lextokens = set() + for n in linfo.tokens: + lexobj.lextokens.add(n) + + # Get literals specification + if isinstance(linfo.literals, (list, tuple)): + lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) + else: + lexobj.lexliterals = linfo.literals + + lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) + + # Get the stateinfo dictionary + stateinfo = linfo.stateinfo + + regexs = {} + # Build the master regular expressions + for state in stateinfo: + regex_list = [] + + # Add rules defined by functions first + for fname, f in linfo.funcsym[state]: + regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) + + # Now add all of the simple rules + for name, r in linfo.strsym[state]: + regex_list.append('(?P<%s>%s)' % (name, r)) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) + + regexs[state] = regex_list + + # Build the master regular expressions + + if debug: + debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') + + for state in regexs: + lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) + lexobj.lexstatere[state] = lexre + lexobj.lexstateretext[state] = re_text + lexobj.lexstaterenames[state] = re_names + if debug: + for i, text in enumerate(re_text): + debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) + + # For inclusive states, we need to add the regular expressions from the INITIAL state + for state, stype in stateinfo.items(): + if state != 'INITIAL' and stype == 'inclusive': + lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) + lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) + lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) + + lexobj.lexstateinfo = stateinfo + lexobj.lexre = lexobj.lexstatere['INITIAL'] + lexobj.lexretext = lexobj.lexstateretext['INITIAL'] + lexobj.lexreflags = reflags + + # Set up ignore variables + lexobj.lexstateignore = linfo.ignore + lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') + + # Set up error functions + lexobj.lexstateerrorf = linfo.errorf + lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) + if not lexobj.lexerrorf: + errorlog.warning('No t_error rule is defined') + + # Set up eof functions + lexobj.lexstateeoff = linfo.eoff + lexobj.lexeoff = linfo.eoff.get('INITIAL', None) + + # Check state information for ignore and error rules + for s, stype in stateinfo.items(): + if stype == 'exclusive': + if s not in linfo.errorf: + errorlog.warning("No error rule is defined for exclusive state '%s'", s) + if s not in linfo.ignore and lexobj.lexignore: + errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) + elif stype == 'inclusive': + if s not in linfo.errorf: + linfo.errorf[s] = linfo.errorf.get('INITIAL', None) + if s not in linfo.ignore: + linfo.ignore[s] = linfo.ignore.get('INITIAL', '') + + # Create global versions of the token() and input() functions + token = lexobj.token + input = lexobj.input + lexer = lexobj + + # If in optimize mode, we write the lextab + if lextab and optimize: + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If lextab specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(lextab, types.ModuleType): + srcfile = lextab.__file__ + else: + if '.' not in lextab: + srcfile = ldict['__file__'] + else: + parts = lextab.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + try: + lexobj.writetab(lextab, outputdir) + if lextab in sys.modules: + del sys.modules[lextab] + except IOError as e: + errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) + + return lexobj + +# ----------------------------------------------------------------------------- +# runmain() +# +# This runs the lexer as a main program +# ----------------------------------------------------------------------------- + +def runmain(lexer=None, data=None): + if not data: + try: + filename = sys.argv[1] + f = open(filename) + data = f.read() + f.close() + except IndexError: + sys.stdout.write('Reading from standard input (type EOF to end):\n') + data = sys.stdin.read() + + if lexer: + _input = lexer.input + else: + _input = input + _input(data) + if lexer: + _token = lexer.token + else: + _token = token + + while True: + tok = _token() + if not tok: + break + sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) + +# ----------------------------------------------------------------------------- +# @TOKEN(regex) +# +# This decorator function can be used to set the regex expression on a function +# when its docstring might need to be set in an alternative way +# ----------------------------------------------------------------------------- + +def TOKEN(r): + def set_regex(f): + if hasattr(r, '__call__'): + f.regex = _get_regex(r) + else: + f.regex = r + return f + return set_regex + +# Alternative spelling of the TOKEN decorator +Token = TOKEN diff --git a/ddtrace/vendor/ply/yacc.py b/ddtrace/vendor/ply/yacc.py new file mode 100644 index 00000000000..88188a1e8ea --- /dev/null +++ b/ddtrace/vendor/ply/yacc.py @@ -0,0 +1,3502 @@ +# ----------------------------------------------------------------------------- +# ply: yacc.py +# +# Copyright (C) 2001-2018 +# David M. Beazley (Dabeaz LLC) +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the David Beazley or Dabeaz LLC may be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------------------------- +# +# This implements an LR parser that is constructed from grammar rules defined +# as Python functions. The grammar is specified by supplying the BNF inside +# Python documentation strings. The inspiration for this technique was borrowed +# from John Aycock's Spark parsing system. PLY might be viewed as cross between +# Spark and the GNU bison utility. +# +# The current implementation is only somewhat object-oriented. The +# LR parser itself is defined in terms of an object (which allows multiple +# parsers to co-exist). However, most of the variables used during table +# construction are defined in terms of global variables. Users shouldn't +# notice unless they are trying to define multiple parsers at the same +# time using threads (in which case they should have their head examined). +# +# This implementation supports both SLR and LALR(1) parsing. LALR(1) +# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), +# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, +# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced +# by the more efficient DeRemer and Pennello algorithm. +# +# :::::::: WARNING ::::::: +# +# Construction of LR parsing tables is fairly complicated and expensive. +# To make this module run fast, a *LOT* of work has been put into +# optimization---often at the expensive of readability and what might +# consider to be good Python "coding style." Modify the code at your +# own risk! +# ---------------------------------------------------------------------------- + +import re +import types +import sys +import os.path +import inspect +import warnings + +__version__ = '3.11' +__tabversion__ = '3.10' + +#----------------------------------------------------------------------------- +# === User configurable parameters === +# +# Change these to modify the default behavior of yacc (if you wish) +#----------------------------------------------------------------------------- + +yaccdebug = True # Debugging mode. If set, yacc generates a + # a 'parser.out' file in the current directory + +debug_file = 'parser.out' # Default name of the debugging file +tab_module = 'parsetab' # Default name of the table module +default_lr = 'LALR' # Default LR table generation method + +error_count = 3 # Number of symbols that must be shifted to leave recovery mode + +yaccdevel = False # Set to True if developing yacc. This turns off optimized + # implementations of certain functions. + +resultlimit = 40 # Size limit of results when running in debug mode. + +pickle_protocol = 0 # Protocol to use when writing pickle files + +# String type-checking compatibility +if sys.version_info[0] < 3: + string_types = basestring +else: + string_types = str + +MAXINT = sys.maxsize + +# This object is a stand-in for a logging object created by the +# logging module. PLY will use this by default to create things +# such as the parser.out file. If a user wants more detailed +# information, they can create their own logging object and pass +# it into PLY. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def debug(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + info = debug + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + critical = debug + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + +# Exception raised for yacc-related errors +class YaccError(Exception): + pass + +# Format the result message that the parser produces when running in debug mode. +def format_result(r): + repr_str = repr(r) + if '\n' in repr_str: + repr_str = repr(repr_str) + if len(repr_str) > resultlimit: + repr_str = repr_str[:resultlimit] + ' ...' + result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) + return result + +# Format stack entries when the parser is running in debug mode +def format_stack_entry(r): + repr_str = repr(r) + if '\n' in repr_str: + repr_str = repr(repr_str) + if len(repr_str) < 16: + return repr_str + else: + return '<%s @ 0x%x>' % (type(r).__name__, id(r)) + +# Panic mode error recovery support. This feature is being reworked--much of the +# code here is to offer a deprecation/backwards compatible transition + +_errok = None +_token = None +_restart = None +_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). +Instead, invoke the methods on the associated parser instance: + + def p_error(p): + ... + # Use parser.errok(), parser.token(), parser.restart() + ... + + parser = yacc.yacc() +''' + +def errok(): + warnings.warn(_warnmsg) + return _errok() + +def restart(): + warnings.warn(_warnmsg) + return _restart() + +def token(): + warnings.warn(_warnmsg) + return _token() + +# Utility function to call the p_error() function with some deprecation hacks +def call_errorfunc(errorfunc, token, parser): + global _errok, _token, _restart + _errok = parser.errok + _token = parser.token + _restart = parser.restart + r = errorfunc(token) + try: + del _errok, _token, _restart + except NameError: + pass + return r + +#----------------------------------------------------------------------------- +# === LR Parsing Engine === +# +# The following classes are used for the LR parser itself. These are not +# used during table construction and are independent of the actual LR +# table generation algorithm +#----------------------------------------------------------------------------- + +# This class is used to hold non-terminal grammar symbols during parsing. +# It normally has the following attributes set: +# .type = Grammar symbol type +# .value = Symbol value +# .lineno = Starting line number +# .endlineno = Ending line number (optional, set automatically) +# .lexpos = Starting lex position +# .endlexpos = Ending lex position (optional, set automatically) + +class YaccSymbol: + def __str__(self): + return self.type + + def __repr__(self): + return str(self) + +# This class is a wrapper around the objects actually passed to each +# grammar rule. Index lookup and assignment actually assign the +# .value attribute of the underlying YaccSymbol object. +# The lineno() method returns the line number of a given +# item (or 0 if not defined). The linespan() method returns +# a tuple of (startline,endline) representing the range of lines +# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) +# representing the range of positional information for a symbol. + +class YaccProduction: + def __init__(self, s, stack=None): + self.slice = s + self.stack = stack + self.lexer = None + self.parser = None + + def __getitem__(self, n): + if isinstance(n, slice): + return [s.value for s in self.slice[n]] + elif n >= 0: + return self.slice[n].value + else: + return self.stack[n].value + + def __setitem__(self, n, v): + self.slice[n].value = v + + def __getslice__(self, i, j): + return [s.value for s in self.slice[i:j]] + + def __len__(self): + return len(self.slice) + + def lineno(self, n): + return getattr(self.slice[n], 'lineno', 0) + + def set_lineno(self, n, lineno): + self.slice[n].lineno = lineno + + def linespan(self, n): + startline = getattr(self.slice[n], 'lineno', 0) + endline = getattr(self.slice[n], 'endlineno', startline) + return startline, endline + + def lexpos(self, n): + return getattr(self.slice[n], 'lexpos', 0) + + def set_lexpos(self, n, lexpos): + self.slice[n].lexpos = lexpos + + def lexspan(self, n): + startpos = getattr(self.slice[n], 'lexpos', 0) + endpos = getattr(self.slice[n], 'endlexpos', startpos) + return startpos, endpos + + def error(self): + raise SyntaxError + +# ----------------------------------------------------------------------------- +# == LRParser == +# +# The LR Parsing engine. +# ----------------------------------------------------------------------------- + +class LRParser: + def __init__(self, lrtab, errorf): + self.productions = lrtab.lr_productions + self.action = lrtab.lr_action + self.goto = lrtab.lr_goto + self.errorfunc = errorf + self.set_defaulted_states() + self.errorok = True + + def errok(self): + self.errorok = True + + def restart(self): + del self.statestack[:] + del self.symstack[:] + sym = YaccSymbol() + sym.type = '$end' + self.symstack.append(sym) + self.statestack.append(0) + + # Defaulted state support. + # This method identifies parser states where there is only one possible reduction action. + # For such states, the parser can make a choose to make a rule reduction without consuming + # the next look-ahead token. This delayed invocation of the tokenizer can be useful in + # certain kinds of advanced parsing situations where the lexer and parser interact with + # each other or change states (i.e., manipulation of scope, lexer states, etc.). + # + # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions + def set_defaulted_states(self): + self.defaulted_states = {} + for state, actions in self.action.items(): + rules = list(actions.values()) + if len(rules) == 1 and rules[0] < 0: + self.defaulted_states[state] = rules[0] + + def disable_defaulted_states(self): + self.defaulted_states = {} + + def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + if debug or yaccdevel: + if isinstance(debug, int): + debug = PlyLogger(sys.stderr) + return self.parsedebug(input, lexer, debug, tracking, tokenfunc) + elif tracking: + return self.parseopt(input, lexer, debug, tracking, tokenfunc) + else: + return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) + + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parsedebug(). + # + # This is the debugging enabled version of parse(). All changes made to the + # parsing engine should be made here. Optimized versions of this function + # are automatically created by the ply/ygen.py script. This script cuts out + # sections enclosed in markers such as this: + # + # #--! DEBUG + # statements + # #--! DEBUG + # + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parsedebug-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + #--! DEBUG + debug.info('PLY: PARSE DEBUG START') + #--! DEBUG + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + #--! DEBUG + debug.debug('') + debug.debug('State : %s', state) + #--! DEBUG + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + #--! DEBUG + debug.debug('Defaulted state %s: Reduce using %d', state, -t) + #--! DEBUG + + #--! DEBUG + debug.debug('Stack : %s', + ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) + #--! DEBUG + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + #--! DEBUG + debug.debug('Action : Shift and goto state %s', t) + #--! DEBUG + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + #--! DEBUG + if plen: + debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, + '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', + goto[statestack[-1-plen]][pname]) + else: + debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], + goto[statestack[-1]][pname]) + + #--! DEBUG + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + #--! TRACKING + if tracking: + t1 = targ[1] + sym.lineno = t1.lineno + sym.lexpos = t1.lexpos + t1 = targ[-1] + sym.endlineno = getattr(t1, 'endlineno', t1.lineno) + sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) + #--! TRACKING + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + #--! DEBUG + debug.info('Result : %s', format_result(pslice[0])) + #--! DEBUG + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + #--! TRACKING + if tracking: + sym.lineno = lexer.lineno + sym.lexpos = lexer.lexpos + #--! TRACKING + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + #--! DEBUG + debug.info('Result : %s', format_result(pslice[0])) + #--! DEBUG + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + #--! DEBUG + debug.info('Done : Returning %s', format_result(result)) + debug.info('PLY: PARSE DEBUG END') + #--! DEBUG + return result + + if t is None: + + #--! DEBUG + debug.error('Error : %s', + ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) + #--! DEBUG + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return + + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + #--! TRACKING + if tracking: + sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) + sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) + #--! TRACKING + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + #--! TRACKING + if tracking: + lookahead.lineno = sym.lineno + lookahead.lexpos = sym.lexpos + #--! TRACKING + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parsedebug-end + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parseopt(). + # + # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! + # This code is automatically generated by the ply/ygen.py script. Make + # changes to the parsedebug() method instead. + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parseopt-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + #--! TRACKING + if tracking: + t1 = targ[1] + sym.lineno = t1.lineno + sym.lexpos = t1.lexpos + t1 = targ[-1] + sym.endlineno = getattr(t1, 'endlineno', t1.lineno) + sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) + #--! TRACKING + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + #--! TRACKING + if tracking: + sym.lineno = lexer.lineno + sym.lexpos = lexer.lexpos + #--! TRACKING + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + return result + + if t is None: + + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return + + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + #--! TRACKING + if tracking: + sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) + sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) + #--! TRACKING + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + #--! TRACKING + if tracking: + lookahead.lineno = sym.lineno + lookahead.lexpos = sym.lexpos + #--! TRACKING + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parseopt-end + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parseopt_notrack(). + # + # Optimized version of parseopt() with line number tracking removed. + # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated + # by the ply/ygen.py script. Make changes to the parsedebug() method instead. + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parseopt-notrack-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + return result + + if t is None: + + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return + + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parseopt-notrack-end + +# ----------------------------------------------------------------------------- +# === Grammar Representation === +# +# The following functions, classes, and variables are used to represent and +# manipulate the rules that make up a grammar. +# ----------------------------------------------------------------------------- + +# regex matching identifiers +_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') + +# ----------------------------------------------------------------------------- +# class Production: +# +# This class stores the raw information about a single production or grammar rule. +# A grammar rule refers to a specification such as this: +# +# expr : expr PLUS term +# +# Here are the basic attributes defined on all productions +# +# name - Name of the production. For example 'expr' +# prod - A list of symbols on the right side ['expr','PLUS','term'] +# prec - Production precedence level +# number - Production number. +# func - Function that executes on reduce +# file - File where production function is defined +# lineno - Line number where production function is defined +# +# The following attributes are defined or optional. +# +# len - Length of the production (number of symbols on right hand side) +# usyms - Set of unique symbols found in the production +# ----------------------------------------------------------------------------- + +class Production(object): + reduced = 0 + def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): + self.name = name + self.prod = tuple(prod) + self.number = number + self.func = func + self.callable = None + self.file = file + self.line = line + self.prec = precedence + + # Internal settings used during table construction + + self.len = len(self.prod) # Length of the production + + # Create a list of unique production symbols used in the production + self.usyms = [] + for s in self.prod: + if s not in self.usyms: + self.usyms.append(s) + + # List of all LR items for the production + self.lr_items = [] + self.lr_next = None + + # Create a string representation + if self.prod: + self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) + else: + self.str = '%s -> ' % self.name + + def __str__(self): + return self.str + + def __repr__(self): + return 'Production(' + str(self) + ')' + + def __len__(self): + return len(self.prod) + + def __nonzero__(self): + return 1 + + def __getitem__(self, index): + return self.prod[index] + + # Return the nth lr_item from the production (or None if at the end) + def lr_item(self, n): + if n > len(self.prod): + return None + p = LRItem(self, n) + # Precompute the list of productions immediately following. + try: + p.lr_after = self.Prodnames[p.prod[n+1]] + except (IndexError, KeyError): + p.lr_after = [] + try: + p.lr_before = p.prod[n-1] + except IndexError: + p.lr_before = None + return p + + # Bind the production function name to a callable + def bind(self, pdict): + if self.func: + self.callable = pdict[self.func] + +# This class serves as a minimal standin for Production objects when +# reading table data from files. It only contains information +# actually used by the LR parsing engine, plus some additional +# debugging information. +class MiniProduction(object): + def __init__(self, str, name, len, func, file, line): + self.name = name + self.len = len + self.func = func + self.callable = None + self.file = file + self.line = line + self.str = str + + def __str__(self): + return self.str + + def __repr__(self): + return 'MiniProduction(%s)' % self.str + + # Bind the production function name to a callable + def bind(self, pdict): + if self.func: + self.callable = pdict[self.func] + + +# ----------------------------------------------------------------------------- +# class LRItem +# +# This class represents a specific stage of parsing a production rule. For +# example: +# +# expr : expr . PLUS term +# +# In the above, the "." represents the current location of the parse. Here +# basic attributes: +# +# name - Name of the production. For example 'expr' +# prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] +# number - Production number. +# +# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' +# then lr_next refers to 'expr -> expr PLUS . term' +# lr_index - LR item index (location of the ".") in the prod list. +# lookaheads - LALR lookahead symbols for this item +# len - Length of the production (number of symbols on right hand side) +# lr_after - List of all productions that immediately follow +# lr_before - Grammar symbol immediately before +# ----------------------------------------------------------------------------- + +class LRItem(object): + def __init__(self, p, n): + self.name = p.name + self.prod = list(p.prod) + self.number = p.number + self.lr_index = n + self.lookaheads = {} + self.prod.insert(n, '.') + self.prod = tuple(self.prod) + self.len = len(self.prod) + self.usyms = p.usyms + + def __str__(self): + if self.prod: + s = '%s -> %s' % (self.name, ' '.join(self.prod)) + else: + s = '%s -> ' % self.name + return s + + def __repr__(self): + return 'LRItem(' + str(self) + ')' + +# ----------------------------------------------------------------------------- +# rightmost_terminal() +# +# Return the rightmost terminal from a list of symbols. Used in add_production() +# ----------------------------------------------------------------------------- +def rightmost_terminal(symbols, terminals): + i = len(symbols) - 1 + while i >= 0: + if symbols[i] in terminals: + return symbols[i] + i -= 1 + return None + +# ----------------------------------------------------------------------------- +# === GRAMMAR CLASS === +# +# The following class represents the contents of the specified grammar along +# with various computed properties such as first sets, follow sets, LR items, etc. +# This data is used for critical parts of the table generation process later. +# ----------------------------------------------------------------------------- + +class GrammarError(YaccError): + pass + +class Grammar(object): + def __init__(self, terminals): + self.Productions = [None] # A list of all of the productions. The first + # entry is always reserved for the purpose of + # building an augmented grammar + + self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all + # productions of that nonterminal. + + self.Prodmap = {} # A dictionary that is only used to detect duplicate + # productions. + + self.Terminals = {} # A dictionary mapping the names of terminal symbols to a + # list of the rules where they are used. + + for term in terminals: + self.Terminals[term] = [] + + self.Terminals['error'] = [] + + self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list + # of rule numbers where they are used. + + self.First = {} # A dictionary of precomputed FIRST(x) symbols + + self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols + + self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the + # form ('right',level) or ('nonassoc', level) or ('left',level) + + self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. + # This is only used to provide error checking and to generate + # a warning about unused precedence rules. + + self.Start = None # Starting symbol for the grammar + + + def __len__(self): + return len(self.Productions) + + def __getitem__(self, index): + return self.Productions[index] + + # ----------------------------------------------------------------------------- + # set_precedence() + # + # Sets the precedence for a given terminal. assoc is the associativity such as + # 'left','right', or 'nonassoc'. level is a numeric level. + # + # ----------------------------------------------------------------------------- + + def set_precedence(self, term, assoc, level): + assert self.Productions == [None], 'Must call set_precedence() before add_production()' + if term in self.Precedence: + raise GrammarError('Precedence already specified for terminal %r' % term) + if assoc not in ['left', 'right', 'nonassoc']: + raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") + self.Precedence[term] = (assoc, level) + + # ----------------------------------------------------------------------------- + # add_production() + # + # Given an action function, this function assembles a production rule and + # computes its precedence level. + # + # The production rule is supplied as a list of symbols. For example, + # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and + # symbols ['expr','PLUS','term']. + # + # Precedence is determined by the precedence of the right-most non-terminal + # or the precedence of a terminal specified by %prec. + # + # A variety of error checks are performed to make sure production symbols + # are valid and that %prec is used correctly. + # ----------------------------------------------------------------------------- + + def add_production(self, prodname, syms, func=None, file='', line=0): + + if prodname in self.Terminals: + raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) + if prodname == 'error': + raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) + if not _is_identifier.match(prodname): + raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) + + # Look for literal tokens + for n, s in enumerate(syms): + if s[0] in "'\"": + try: + c = eval(s) + if (len(c) > 1): + raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % + (file, line, s, prodname)) + if c not in self.Terminals: + self.Terminals[c] = [] + syms[n] = c + continue + except SyntaxError: + pass + if not _is_identifier.match(s) and s != '%prec': + raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) + + # Determine the precedence level + if '%prec' in syms: + if syms[-1] == '%prec': + raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) + if syms[-2] != '%prec': + raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % + (file, line)) + precname = syms[-1] + prodprec = self.Precedence.get(precname) + if not prodprec: + raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) + else: + self.UsedPrecedence.add(precname) + del syms[-2:] # Drop %prec from the rule + else: + # If no %prec, precedence is determined by the rightmost terminal symbol + precname = rightmost_terminal(syms, self.Terminals) + prodprec = self.Precedence.get(precname, ('right', 0)) + + # See if the rule is already in the rulemap + map = '%s -> %s' % (prodname, syms) + if map in self.Prodmap: + m = self.Prodmap[map] + raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + + 'Previous definition at %s:%d' % (m.file, m.line)) + + # From this point on, everything is valid. Create a new Production instance + pnumber = len(self.Productions) + if prodname not in self.Nonterminals: + self.Nonterminals[prodname] = [] + + # Add the production number to Terminals and Nonterminals + for t in syms: + if t in self.Terminals: + self.Terminals[t].append(pnumber) + else: + if t not in self.Nonterminals: + self.Nonterminals[t] = [] + self.Nonterminals[t].append(pnumber) + + # Create a production and add it to the list of productions + p = Production(pnumber, prodname, syms, prodprec, func, file, line) + self.Productions.append(p) + self.Prodmap[map] = p + + # Add to the global productions list + try: + self.Prodnames[prodname].append(p) + except KeyError: + self.Prodnames[prodname] = [p] + + # ----------------------------------------------------------------------------- + # set_start() + # + # Sets the starting symbol and creates the augmented grammar. Production + # rule 0 is S' -> start where start is the start symbol. + # ----------------------------------------------------------------------------- + + def set_start(self, start=None): + if not start: + start = self.Productions[1].name + if start not in self.Nonterminals: + raise GrammarError('start symbol %s undefined' % start) + self.Productions[0] = Production(0, "S'", [start]) + self.Nonterminals[start].append(0) + self.Start = start + + # ----------------------------------------------------------------------------- + # find_unreachable() + # + # Find all of the nonterminal symbols that can't be reached from the starting + # symbol. Returns a list of nonterminals that can't be reached. + # ----------------------------------------------------------------------------- + + def find_unreachable(self): + + # Mark all symbols that are reachable from a symbol s + def mark_reachable_from(s): + if s in reachable: + return + reachable.add(s) + for p in self.Prodnames.get(s, []): + for r in p.prod: + mark_reachable_from(r) + + reachable = set() + mark_reachable_from(self.Productions[0].prod[0]) + return [s for s in self.Nonterminals if s not in reachable] + + # ----------------------------------------------------------------------------- + # infinite_cycles() + # + # This function looks at the various parsing rules and tries to detect + # infinite recursion cycles (grammar rules where there is no possible way + # to derive a string of only terminals). + # ----------------------------------------------------------------------------- + + def infinite_cycles(self): + terminates = {} + + # Terminals: + for t in self.Terminals: + terminates[t] = True + + terminates['$end'] = True + + # Nonterminals: + + # Initialize to false: + for n in self.Nonterminals: + terminates[n] = False + + # Then propagate termination until no change: + while True: + some_change = False + for (n, pl) in self.Prodnames.items(): + # Nonterminal n terminates iff any of its productions terminates. + for p in pl: + # Production p terminates iff all of its rhs symbols terminate. + for s in p.prod: + if not terminates[s]: + # The symbol s does not terminate, + # so production p does not terminate. + p_terminates = False + break + else: + # didn't break from the loop, + # so every symbol s terminates + # so production p terminates. + p_terminates = True + + if p_terminates: + # symbol n terminates! + if not terminates[n]: + terminates[n] = True + some_change = True + # Don't need to consider any more productions for this n. + break + + if not some_change: + break + + infinite = [] + for (s, term) in terminates.items(): + if not term: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + # s is used-but-not-defined, and we've already warned of that, + # so it would be overkill to say that it's also non-terminating. + pass + else: + infinite.append(s) + + return infinite + + # ----------------------------------------------------------------------------- + # undefined_symbols() + # + # Find all symbols that were used the grammar, but not defined as tokens or + # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol + # and prod is the production where the symbol was used. + # ----------------------------------------------------------------------------- + def undefined_symbols(self): + result = [] + for p in self.Productions: + if not p: + continue + + for s in p.prod: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + result.append((s, p)) + return result + + # ----------------------------------------------------------------------------- + # unused_terminals() + # + # Find all terminals that were defined, but not used by the grammar. Returns + # a list of all symbols. + # ----------------------------------------------------------------------------- + def unused_terminals(self): + unused_tok = [] + for s, v in self.Terminals.items(): + if s != 'error' and not v: + unused_tok.append(s) + + return unused_tok + + # ------------------------------------------------------------------------------ + # unused_rules() + # + # Find all grammar rules that were defined, but not used (maybe not reachable) + # Returns a list of productions. + # ------------------------------------------------------------------------------ + + def unused_rules(self): + unused_prod = [] + for s, v in self.Nonterminals.items(): + if not v: + p = self.Prodnames[s][0] + unused_prod.append(p) + return unused_prod + + # ----------------------------------------------------------------------------- + # unused_precedence() + # + # Returns a list of tuples (term,precedence) corresponding to precedence + # rules that were never used by the grammar. term is the name of the terminal + # on which precedence was applied and precedence is a string such as 'left' or + # 'right' corresponding to the type of precedence. + # ----------------------------------------------------------------------------- + + def unused_precedence(self): + unused = [] + for termname in self.Precedence: + if not (termname in self.Terminals or termname in self.UsedPrecedence): + unused.append((termname, self.Precedence[termname][0])) + + return unused + + # ------------------------------------------------------------------------- + # _first() + # + # Compute the value of FIRST1(beta) where beta is a tuple of symbols. + # + # During execution of compute_first1, the result may be incomplete. + # Afterward (e.g., when called from compute_follow()), it will be complete. + # ------------------------------------------------------------------------- + def _first(self, beta): + + # We are computing First(x1,x2,x3,...,xn) + result = [] + for x in beta: + x_produces_empty = False + + # Add all the non- symbols of First[x] to the result. + for f in self.First[x]: + if f == '': + x_produces_empty = True + else: + if f not in result: + result.append(f) + + if x_produces_empty: + # We have to consider the next x in beta, + # i.e. stay in the loop. + pass + else: + # We don't have to consider any further symbols in beta. + break + else: + # There was no 'break' from the loop, + # so x_produces_empty was true for all x in beta, + # so beta produces empty as well. + result.append('') + + return result + + # ------------------------------------------------------------------------- + # compute_first() + # + # Compute the value of FIRST1(X) for all symbols + # ------------------------------------------------------------------------- + def compute_first(self): + if self.First: + return self.First + + # Terminals: + for t in self.Terminals: + self.First[t] = [t] + + self.First['$end'] = ['$end'] + + # Nonterminals: + + # Initialize to the empty set: + for n in self.Nonterminals: + self.First[n] = [] + + # Then propagate symbols until no change: + while True: + some_change = False + for n in self.Nonterminals: + for p in self.Prodnames[n]: + for f in self._first(p.prod): + if f not in self.First[n]: + self.First[n].append(f) + some_change = True + if not some_change: + break + + return self.First + + # --------------------------------------------------------------------- + # compute_follow() + # + # Computes all of the follow sets for every non-terminal symbol. The + # follow set is the set of all symbols that might follow a given + # non-terminal. See the Dragon book, 2nd Ed. p. 189. + # --------------------------------------------------------------------- + def compute_follow(self, start=None): + # If already computed, return the result + if self.Follow: + return self.Follow + + # If first sets not computed yet, do that first. + if not self.First: + self.compute_first() + + # Add '$end' to the follow list of the start symbol + for k in self.Nonterminals: + self.Follow[k] = [] + + if not start: + start = self.Productions[1].name + + self.Follow[start] = ['$end'] + + while True: + didadd = False + for p in self.Productions[1:]: + # Here is the production set + for i, B in enumerate(p.prod): + if B in self.Nonterminals: + # Okay. We got a non-terminal in a production + fst = self._first(p.prod[i+1:]) + hasempty = False + for f in fst: + if f != '' and f not in self.Follow[B]: + self.Follow[B].append(f) + didadd = True + if f == '': + hasempty = True + if hasempty or i == (len(p.prod)-1): + # Add elements of follow(a) to follow(b) + for f in self.Follow[p.name]: + if f not in self.Follow[B]: + self.Follow[B].append(f) + didadd = True + if not didadd: + break + return self.Follow + + + # ----------------------------------------------------------------------------- + # build_lritems() + # + # This function walks the list of productions and builds a complete set of the + # LR items. The LR items are stored in two ways: First, they are uniquely + # numbered and placed in the list _lritems. Second, a linked list of LR items + # is built for each production. For example: + # + # E -> E PLUS E + # + # Creates the list + # + # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] + # ----------------------------------------------------------------------------- + + def build_lritems(self): + for p in self.Productions: + lastlri = p + i = 0 + lr_items = [] + while True: + if i > len(p): + lri = None + else: + lri = LRItem(p, i) + # Precompute the list of productions immediately following + try: + lri.lr_after = self.Prodnames[lri.prod[i+1]] + except (IndexError, KeyError): + lri.lr_after = [] + try: + lri.lr_before = lri.prod[i-1] + except IndexError: + lri.lr_before = None + + lastlri.lr_next = lri + if not lri: + break + lr_items.append(lri) + lastlri = lri + i += 1 + p.lr_items = lr_items + +# ----------------------------------------------------------------------------- +# == Class LRTable == +# +# This basic class represents a basic table of LR parsing information. +# Methods for generating the tables are not defined here. They are defined +# in the derived class LRGeneratedTable. +# ----------------------------------------------------------------------------- + +class VersionError(YaccError): + pass + +class LRTable(object): + def __init__(self): + self.lr_action = None + self.lr_goto = None + self.lr_productions = None + self.lr_method = None + + def read_table(self, module): + if isinstance(module, types.ModuleType): + parsetab = module + else: + exec('import %s' % module) + parsetab = sys.modules[module] + + if parsetab._tabversion != __tabversion__: + raise VersionError('yacc table file version is out of date') + + self.lr_action = parsetab._lr_action + self.lr_goto = parsetab._lr_goto + + self.lr_productions = [] + for p in parsetab._lr_productions: + self.lr_productions.append(MiniProduction(*p)) + + self.lr_method = parsetab._lr_method + return parsetab._lr_signature + + def read_pickle(self, filename): + try: + import cPickle as pickle + except ImportError: + import pickle + + if not os.path.exists(filename): + raise ImportError + + in_f = open(filename, 'rb') + + tabversion = pickle.load(in_f) + if tabversion != __tabversion__: + raise VersionError('yacc table file version is out of date') + self.lr_method = pickle.load(in_f) + signature = pickle.load(in_f) + self.lr_action = pickle.load(in_f) + self.lr_goto = pickle.load(in_f) + productions = pickle.load(in_f) + + self.lr_productions = [] + for p in productions: + self.lr_productions.append(MiniProduction(*p)) + + in_f.close() + return signature + + # Bind all production function names to callable objects in pdict + def bind_callables(self, pdict): + for p in self.lr_productions: + p.bind(pdict) + + +# ----------------------------------------------------------------------------- +# === LR Generator === +# +# The following classes and functions are used to generate LR parsing tables on +# a grammar. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# digraph() +# traverse() +# +# The following two functions are used to compute set valued functions +# of the form: +# +# F(x) = F'(x) U U{F(y) | x R y} +# +# This is used to compute the values of Read() sets as well as FOLLOW sets +# in LALR(1) generation. +# +# Inputs: X - An input set +# R - A relation +# FP - Set-valued function +# ------------------------------------------------------------------------------ + +def digraph(X, R, FP): + N = {} + for x in X: + N[x] = 0 + stack = [] + F = {} + for x in X: + if N[x] == 0: + traverse(x, N, stack, F, X, R, FP) + return F + +def traverse(x, N, stack, F, X, R, FP): + stack.append(x) + d = len(stack) + N[x] = d + F[x] = FP(x) # F(X) <- F'(x) + + rel = R(x) # Get y's related to x + for y in rel: + if N[y] == 0: + traverse(y, N, stack, F, X, R, FP) + N[x] = min(N[x], N[y]) + for a in F.get(y, []): + if a not in F[x]: + F[x].append(a) + if N[x] == d: + N[stack[-1]] = MAXINT + F[stack[-1]] = F[x] + element = stack.pop() + while element != x: + N[stack[-1]] = MAXINT + F[stack[-1]] = F[x] + element = stack.pop() + +class LALRError(YaccError): + pass + +# ----------------------------------------------------------------------------- +# == LRGeneratedTable == +# +# This class implements the LR table generation algorithm. There are no +# public methods except for write() +# ----------------------------------------------------------------------------- + +class LRGeneratedTable(LRTable): + def __init__(self, grammar, method='LALR', log=None): + if method not in ['SLR', 'LALR']: + raise LALRError('Unsupported method %s' % method) + + self.grammar = grammar + self.lr_method = method + + # Set up the logger + if not log: + log = NullLogger() + self.log = log + + # Internal attributes + self.lr_action = {} # Action table + self.lr_goto = {} # Goto table + self.lr_productions = grammar.Productions # Copy of grammar Production array + self.lr_goto_cache = {} # Cache of computed gotos + self.lr0_cidhash = {} # Cache of closures + + self._add_count = 0 # Internal counter used to detect cycles + + # Diagonistic information filled in by the table generator + self.sr_conflict = 0 + self.rr_conflict = 0 + self.conflicts = [] # List of conflicts + + self.sr_conflicts = [] + self.rr_conflicts = [] + + # Build the tables + self.grammar.build_lritems() + self.grammar.compute_first() + self.grammar.compute_follow() + self.lr_parse_table() + + # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. + + def lr0_closure(self, I): + self._add_count += 1 + + # Add everything in I to J + J = I[:] + didadd = True + while didadd: + didadd = False + for j in J: + for x in j.lr_after: + if getattr(x, 'lr0_added', 0) == self._add_count: + continue + # Add B --> .G to J + J.append(x.lr_next) + x.lr0_added = self._add_count + didadd = True + + return J + + # Compute the LR(0) goto function goto(I,X) where I is a set + # of LR(0) items and X is a grammar symbol. This function is written + # in a way that guarantees uniqueness of the generated goto sets + # (i.e. the same goto set will never be returned as two different Python + # objects). With uniqueness, we can later do fast set comparisons using + # id(obj) instead of element-wise comparison. + + def lr0_goto(self, I, x): + # First we look for a previously cached entry + g = self.lr_goto_cache.get((id(I), x)) + if g: + return g + + # Now we generate the goto set in a way that guarantees uniqueness + # of the result + + s = self.lr_goto_cache.get(x) + if not s: + s = {} + self.lr_goto_cache[x] = s + + gs = [] + for p in I: + n = p.lr_next + if n and n.lr_before == x: + s1 = s.get(id(n)) + if not s1: + s1 = {} + s[id(n)] = s1 + gs.append(n) + s = s1 + g = s.get('$end') + if not g: + if gs: + g = self.lr0_closure(gs) + s['$end'] = g + else: + s['$end'] = gs + self.lr_goto_cache[(id(I), x)] = g + return g + + # Compute the LR(0) sets of item function + def lr0_items(self): + C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] + i = 0 + for I in C: + self.lr0_cidhash[id(I)] = i + i += 1 + + # Loop over the items in C and each grammar symbols + i = 0 + while i < len(C): + I = C[i] + i += 1 + + # Collect all of the symbols that could possibly be in the goto(I,X) sets + asyms = {} + for ii in I: + for s in ii.usyms: + asyms[s] = None + + for x in asyms: + g = self.lr0_goto(I, x) + if not g or id(g) in self.lr0_cidhash: + continue + self.lr0_cidhash[id(g)] = len(C) + C.append(g) + + return C + + # ----------------------------------------------------------------------------- + # ==== LALR(1) Parsing ==== + # + # LALR(1) parsing is almost exactly the same as SLR except that instead of + # relying upon Follow() sets when performing reductions, a more selective + # lookahead set that incorporates the state of the LR(0) machine is utilized. + # Thus, we mainly just have to focus on calculating the lookahead sets. + # + # The method used here is due to DeRemer and Pennelo (1982). + # + # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) + # Lookahead Sets", ACM Transactions on Programming Languages and Systems, + # Vol. 4, No. 4, Oct. 1982, pp. 615-649 + # + # Further details can also be found in: + # + # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", + # McGraw-Hill Book Company, (1985). + # + # ----------------------------------------------------------------------------- + + # ----------------------------------------------------------------------------- + # compute_nullable_nonterminals() + # + # Creates a dictionary containing all of the non-terminals that might produce + # an empty production. + # ----------------------------------------------------------------------------- + + def compute_nullable_nonterminals(self): + nullable = set() + num_nullable = 0 + while True: + for p in self.grammar.Productions[1:]: + if p.len == 0: + nullable.add(p.name) + continue + for t in p.prod: + if t not in nullable: + break + else: + nullable.add(p.name) + if len(nullable) == num_nullable: + break + num_nullable = len(nullable) + return nullable + + # ----------------------------------------------------------------------------- + # find_nonterminal_trans(C) + # + # Given a set of LR(0) items, this functions finds all of the non-terminal + # transitions. These are transitions in which a dot appears immediately before + # a non-terminal. Returns a list of tuples of the form (state,N) where state + # is the state number and N is the nonterminal symbol. + # + # The input C is the set of LR(0) items. + # ----------------------------------------------------------------------------- + + def find_nonterminal_transitions(self, C): + trans = [] + for stateno, state in enumerate(C): + for p in state: + if p.lr_index < p.len - 1: + t = (stateno, p.prod[p.lr_index+1]) + if t[1] in self.grammar.Nonterminals: + if t not in trans: + trans.append(t) + return trans + + # ----------------------------------------------------------------------------- + # dr_relation() + # + # Computes the DR(p,A) relationships for non-terminal transitions. The input + # is a tuple (state,N) where state is a number and N is a nonterminal symbol. + # + # Returns a list of terminals. + # ----------------------------------------------------------------------------- + + def dr_relation(self, C, trans, nullable): + state, N = trans + terms = [] + + g = self.lr0_goto(C[state], N) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index+1] + if a in self.grammar.Terminals: + if a not in terms: + terms.append(a) + + # This extra bit is to handle the start state + if state == 0 and N == self.grammar.Productions[0].prod[0]: + terms.append('$end') + + return terms + + # ----------------------------------------------------------------------------- + # reads_relation() + # + # Computes the READS() relation (p,A) READS (t,C). + # ----------------------------------------------------------------------------- + + def reads_relation(self, C, trans, empty): + # Look for empty transitions + rel = [] + state, N = trans + + g = self.lr0_goto(C[state], N) + j = self.lr0_cidhash.get(id(g), -1) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index + 1] + if a in empty: + rel.append((j, a)) + + return rel + + # ----------------------------------------------------------------------------- + # compute_lookback_includes() + # + # Determines the lookback and includes relations + # + # LOOKBACK: + # + # This relation is determined by running the LR(0) state machine forward. + # For example, starting with a production "N : . A B C", we run it forward + # to obtain "N : A B C ." We then build a relationship between this final + # state and the starting state. These relationships are stored in a dictionary + # lookdict. + # + # INCLUDES: + # + # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). + # + # This relation is used to determine non-terminal transitions that occur + # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) + # if the following holds: + # + # B -> LAT, where T -> epsilon and p' -L-> p + # + # L is essentially a prefix (which may be empty), T is a suffix that must be + # able to derive an empty string. State p' must lead to state p with the string L. + # + # ----------------------------------------------------------------------------- + + def compute_lookback_includes(self, C, trans, nullable): + lookdict = {} # Dictionary of lookback relations + includedict = {} # Dictionary of include relations + + # Make a dictionary of non-terminal transitions + dtrans = {} + for t in trans: + dtrans[t] = 1 + + # Loop over all transitions and compute lookbacks and includes + for state, N in trans: + lookb = [] + includes = [] + for p in C[state]: + if p.name != N: + continue + + # Okay, we have a name match. We now follow the production all the way + # through the state machine until we get the . on the right hand side + + lr_index = p.lr_index + j = state + while lr_index < p.len - 1: + lr_index = lr_index + 1 + t = p.prod[lr_index] + + # Check to see if this symbol and state are a non-terminal transition + if (j, t) in dtrans: + # Yes. Okay, there is some chance that this is an includes relation + # the only way to know for certain is whether the rest of the + # production derives empty + + li = lr_index + 1 + while li < p.len: + if p.prod[li] in self.grammar.Terminals: + break # No forget it + if p.prod[li] not in nullable: + break + li = li + 1 + else: + # Appears to be a relation between (j,t) and (state,N) + includes.append((j, t)) + + g = self.lr0_goto(C[j], t) # Go to next set + j = self.lr0_cidhash.get(id(g), -1) # Go to next state + + # When we get here, j is the final state, now we have to locate the production + for r in C[j]: + if r.name != p.name: + continue + if r.len != p.len: + continue + i = 0 + # This look is comparing a production ". A B C" with "A B C ." + while i < r.lr_index: + if r.prod[i] != p.prod[i+1]: + break + i = i + 1 + else: + lookb.append((j, r)) + for i in includes: + if i not in includedict: + includedict[i] = [] + includedict[i].append((state, N)) + lookdict[(state, N)] = lookb + + return lookdict, includedict + + # ----------------------------------------------------------------------------- + # compute_read_sets() + # + # Given a set of LR(0) items, this function computes the read sets. + # + # Inputs: C = Set of LR(0) items + # ntrans = Set of nonterminal transitions + # nullable = Set of empty transitions + # + # Returns a set containing the read sets + # ----------------------------------------------------------------------------- + + def compute_read_sets(self, C, ntrans, nullable): + FP = lambda x: self.dr_relation(C, x, nullable) + R = lambda x: self.reads_relation(C, x, nullable) + F = digraph(ntrans, R, FP) + return F + + # ----------------------------------------------------------------------------- + # compute_follow_sets() + # + # Given a set of LR(0) items, a set of non-terminal transitions, a readset, + # and an include set, this function computes the follow sets + # + # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} + # + # Inputs: + # ntrans = Set of nonterminal transitions + # readsets = Readset (previously computed) + # inclsets = Include sets (previously computed) + # + # Returns a set containing the follow sets + # ----------------------------------------------------------------------------- + + def compute_follow_sets(self, ntrans, readsets, inclsets): + FP = lambda x: readsets[x] + R = lambda x: inclsets.get(x, []) + F = digraph(ntrans, R, FP) + return F + + # ----------------------------------------------------------------------------- + # add_lookaheads() + # + # Attaches the lookahead symbols to grammar rules. + # + # Inputs: lookbacks - Set of lookback relations + # followset - Computed follow set + # + # This function directly attaches the lookaheads to productions contained + # in the lookbacks set + # ----------------------------------------------------------------------------- + + def add_lookaheads(self, lookbacks, followset): + for trans, lb in lookbacks.items(): + # Loop over productions in lookback + for state, p in lb: + if state not in p.lookaheads: + p.lookaheads[state] = [] + f = followset.get(trans, []) + for a in f: + if a not in p.lookaheads[state]: + p.lookaheads[state].append(a) + + # ----------------------------------------------------------------------------- + # add_lalr_lookaheads() + # + # This function does all of the work of adding lookahead information for use + # with LALR parsing + # ----------------------------------------------------------------------------- + + def add_lalr_lookaheads(self, C): + # Determine all of the nullable nonterminals + nullable = self.compute_nullable_nonterminals() + + # Find all non-terminal transitions + trans = self.find_nonterminal_transitions(C) + + # Compute read sets + readsets = self.compute_read_sets(C, trans, nullable) + + # Compute lookback/includes relations + lookd, included = self.compute_lookback_includes(C, trans, nullable) + + # Compute LALR FOLLOW sets + followsets = self.compute_follow_sets(trans, readsets, included) + + # Add all of the lookaheads + self.add_lookaheads(lookd, followsets) + + # ----------------------------------------------------------------------------- + # lr_parse_table() + # + # This function constructs the parse tables for SLR or LALR + # ----------------------------------------------------------------------------- + def lr_parse_table(self): + Productions = self.grammar.Productions + Precedence = self.grammar.Precedence + goto = self.lr_goto # Goto array + action = self.lr_action # Action array + log = self.log # Logger for output + + actionp = {} # Action production array (temporary) + + log.info('Parsing method: %s', self.lr_method) + + # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items + # This determines the number of states + + C = self.lr0_items() + + if self.lr_method == 'LALR': + self.add_lalr_lookaheads(C) + + # Build the parser table, state by state + st = 0 + for I in C: + # Loop over each production in I + actlist = [] # List of actions + st_action = {} + st_actionp = {} + st_goto = {} + log.info('') + log.info('state %d', st) + log.info('') + for p in I: + log.info(' (%d) %s', p.number, p) + log.info('') + + for p in I: + if p.len == p.lr_index + 1: + if p.name == "S'": + # Start symbol. Accept! + st_action['$end'] = 0 + st_actionp['$end'] = p + else: + # We are at the end of a production. Reduce! + if self.lr_method == 'LALR': + laheads = p.lookaheads[st] + else: + laheads = self.grammar.Follow[p.name] + for a in laheads: + actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) + r = st_action.get(a) + if r is not None: + # Whoa. Have a shift/reduce or reduce/reduce conflict + if r > 0: + # Need to decide on shift or reduce here + # By default we favor shifting. Need to add + # some precedence rules here. + + # Shift precedence comes from the token + sprec, slevel = Precedence.get(a, ('right', 0)) + + # Reduce precedence comes from rule being reduced (p) + rprec, rlevel = Productions[p.number].prec + + if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): + # We really need to reduce here. + st_action[a] = -p.number + st_actionp[a] = p + if not slevel and not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as reduce', a) + self.sr_conflicts.append((st, a, 'reduce')) + Productions[p.number].reduced += 1 + elif (slevel == rlevel) and (rprec == 'nonassoc'): + st_action[a] = None + else: + # Hmmm. Guess we'll keep the shift + if not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as shift', a) + self.sr_conflicts.append((st, a, 'shift')) + elif r < 0: + # Reduce/reduce conflict. In this case, we favor the rule + # that was defined first in the grammar file + oldp = Productions[-r] + pp = Productions[p.number] + if oldp.line > pp.line: + st_action[a] = -p.number + st_actionp[a] = p + chosenp, rejectp = pp, oldp + Productions[p.number].reduced += 1 + Productions[oldp.number].reduced -= 1 + else: + chosenp, rejectp = oldp, pp + self.rr_conflicts.append((st, chosenp, rejectp)) + log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', + a, st_actionp[a].number, st_actionp[a]) + else: + raise LALRError('Unknown conflict in state %d' % st) + else: + st_action[a] = -p.number + st_actionp[a] = p + Productions[p.number].reduced += 1 + else: + i = p.lr_index + a = p.prod[i+1] # Get symbol right after the "." + if a in self.grammar.Terminals: + g = self.lr0_goto(I, a) + j = self.lr0_cidhash.get(id(g), -1) + if j >= 0: + # We are in a shift state + actlist.append((a, p, 'shift and go to state %d' % j)) + r = st_action.get(a) + if r is not None: + # Whoa have a shift/reduce or shift/shift conflict + if r > 0: + if r != j: + raise LALRError('Shift/shift conflict in state %d' % st) + elif r < 0: + # Do a precedence check. + # - if precedence of reduce rule is higher, we reduce. + # - if precedence of reduce is same and left assoc, we reduce. + # - otherwise we shift + + # Shift precedence comes from the token + sprec, slevel = Precedence.get(a, ('right', 0)) + + # Reduce precedence comes from the rule that could have been reduced + rprec, rlevel = Productions[st_actionp[a].number].prec + + if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): + # We decide to shift here... highest precedence to shift + Productions[st_actionp[a].number].reduced -= 1 + st_action[a] = j + st_actionp[a] = p + if not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as shift', a) + self.sr_conflicts.append((st, a, 'shift')) + elif (slevel == rlevel) and (rprec == 'nonassoc'): + st_action[a] = None + else: + # Hmmm. Guess we'll keep the reduce + if not slevel and not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as reduce', a) + self.sr_conflicts.append((st, a, 'reduce')) + + else: + raise LALRError('Unknown conflict in state %d' % st) + else: + st_action[a] = j + st_actionp[a] = p + + # Print the actions associated with each terminal + _actprint = {} + for a, p, m in actlist: + if a in st_action: + if p is st_actionp[a]: + log.info(' %-15s %s', a, m) + _actprint[(a, m)] = 1 + log.info('') + # Print the actions that were not used. (debugging) + not_used = 0 + for a, p, m in actlist: + if a in st_action: + if p is not st_actionp[a]: + if not (a, m) in _actprint: + log.debug(' ! %-15s [ %s ]', a, m) + not_used = 1 + _actprint[(a, m)] = 1 + if not_used: + log.debug('') + + # Construct the goto table for this state + + nkeys = {} + for ii in I: + for s in ii.usyms: + if s in self.grammar.Nonterminals: + nkeys[s] = None + for n in nkeys: + g = self.lr0_goto(I, n) + j = self.lr0_cidhash.get(id(g), -1) + if j >= 0: + st_goto[n] = j + log.info(' %-30s shift and go to state %d', n, j) + + action[st] = st_action + actionp[st] = st_actionp + goto[st] = st_goto + st += 1 + + # ----------------------------------------------------------------------------- + # write() + # + # This function writes the LR parsing tables to a file + # ----------------------------------------------------------------------------- + + def write_table(self, tabmodule, outputdir='', signature=''): + if isinstance(tabmodule, types.ModuleType): + raise IOError("Won't overwrite existing tabmodule") + + basemodulename = tabmodule.split('.')[-1] + filename = os.path.join(outputdir, basemodulename) + '.py' + try: + f = open(filename, 'w') + + f.write(''' +# %s +# This file is automatically generated. Do not edit. +# pylint: disable=W,C,R +_tabversion = %r + +_lr_method = %r + +_lr_signature = %r + ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) + + # Change smaller to 0 to go back to original tables + smaller = 1 + + # Factor out names to try and make smaller + if smaller: + items = {} + + for s, nd in self.lr_action.items(): + for name, v in nd.items(): + i = items.get(name) + if not i: + i = ([], []) + items[name] = i + i[0].append(s) + i[1].append(v) + + f.write('\n_lr_action_items = {') + for k, v in items.items(): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) + f.write('],[') + for i in v[1]: + f.write('%r,' % i) + + f.write(']),') + f.write('}\n') + + f.write(''' +_lr_action = {} +for _k, _v in _lr_action_items.items(): + for _x,_y in zip(_v[0],_v[1]): + if not _x in _lr_action: _lr_action[_x] = {} + _lr_action[_x][_k] = _y +del _lr_action_items +''') + + else: + f.write('\n_lr_action = { ') + for k, v in self.lr_action.items(): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + + if smaller: + # Factor out names to try and make smaller + items = {} + + for s, nd in self.lr_goto.items(): + for name, v in nd.items(): + i = items.get(name) + if not i: + i = ([], []) + items[name] = i + i[0].append(s) + i[1].append(v) + + f.write('\n_lr_goto_items = {') + for k, v in items.items(): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) + f.write('],[') + for i in v[1]: + f.write('%r,' % i) + + f.write(']),') + f.write('}\n') + + f.write(''' +_lr_goto = {} +for _k, _v in _lr_goto_items.items(): + for _x, _y in zip(_v[0], _v[1]): + if not _x in _lr_goto: _lr_goto[_x] = {} + _lr_goto[_x][_k] = _y +del _lr_goto_items +''') + else: + f.write('\n_lr_goto = { ') + for k, v in self.lr_goto.items(): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + + # Write production table + f.write('_lr_productions = [\n') + for p in self.lr_productions: + if p.func: + f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, + p.func, os.path.basename(p.file), p.line)) + else: + f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) + f.write(']\n') + f.close() + + except IOError as e: + raise + + + # ----------------------------------------------------------------------------- + # pickle_table() + # + # This function pickles the LR parsing tables to a supplied file object + # ----------------------------------------------------------------------------- + + def pickle_table(self, filename, signature=''): + try: + import cPickle as pickle + except ImportError: + import pickle + with open(filename, 'wb') as outf: + pickle.dump(__tabversion__, outf, pickle_protocol) + pickle.dump(self.lr_method, outf, pickle_protocol) + pickle.dump(signature, outf, pickle_protocol) + pickle.dump(self.lr_action, outf, pickle_protocol) + pickle.dump(self.lr_goto, outf, pickle_protocol) + + outp = [] + for p in self.lr_productions: + if p.func: + outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) + else: + outp.append((str(p), p.name, p.len, None, None, None)) + pickle.dump(outp, outf, pickle_protocol) + +# ----------------------------------------------------------------------------- +# === INTROSPECTION === +# +# The following functions and classes are used to implement the PLY +# introspection features followed by the yacc() function itself. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- + +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# parse_grammar() +# +# This takes a raw grammar rule string and parses it into production data +# ----------------------------------------------------------------------------- +def parse_grammar(doc, file, line): + grammar = [] + # Split the doc string into lines + pstrings = doc.splitlines() + lastp = None + dline = line + for ps in pstrings: + dline += 1 + p = ps.split() + if not p: + continue + try: + if p[0] == '|': + # This is a continuation of a previous rule + if not lastp: + raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) + prodname = lastp + syms = p[1:] + else: + prodname = p[0] + lastp = prodname + syms = p[2:] + assign = p[1] + if assign != ':' and assign != '::=': + raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) + + grammar.append((file, dline, prodname, syms)) + except SyntaxError: + raise + except Exception: + raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) + + return grammar + +# ----------------------------------------------------------------------------- +# ParserReflect() +# +# This class represents information extracted for building a parser including +# start symbol, error function, tokens, precedence list, action functions, +# etc. +# ----------------------------------------------------------------------------- +class ParserReflect(object): + def __init__(self, pdict, log=None): + self.pdict = pdict + self.start = None + self.error_func = None + self.tokens = None + self.modules = set() + self.grammar = [] + self.error = False + + if log is None: + self.log = PlyLogger(sys.stderr) + else: + self.log = log + + # Get all of the basic information + def get_all(self): + self.get_start() + self.get_error_func() + self.get_tokens() + self.get_precedence() + self.get_pfunctions() + + # Validate all of the information + def validate_all(self): + self.validate_start() + self.validate_error_func() + self.validate_tokens() + self.validate_precedence() + self.validate_pfunctions() + self.validate_modules() + return self.error + + # Compute a signature over the grammar + def signature(self): + parts = [] + try: + if self.start: + parts.append(self.start) + if self.prec: + parts.append(''.join([''.join(p) for p in self.prec])) + if self.tokens: + parts.append(' '.join(self.tokens)) + for f in self.pfuncs: + if f[3]: + parts.append(f[3]) + except (TypeError, ValueError): + pass + return ''.join(parts) + + # ----------------------------------------------------------------------------- + # validate_modules() + # + # This method checks to see if there are duplicated p_rulename() functions + # in the parser module file. Without this function, it is really easy for + # users to make mistakes by cutting and pasting code fragments (and it's a real + # bugger to try and figure out why the resulting parser doesn't work). Therefore, + # we just do a little regular expression pattern matching of def statements + # to try and detect duplicates. + # ----------------------------------------------------------------------------- + + def validate_modules(self): + # Match def p_funcname( + fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') + + for module in self.modules: + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + continue + + counthash = {} + for linen, line in enumerate(lines): + linen += 1 + m = fre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', + filename, linen, name, prev) + + # Get the start symbol + def get_start(self): + self.start = self.pdict.get('start') + + # Validate the start symbol + def validate_start(self): + if self.start is not None: + if not isinstance(self.start, string_types): + self.log.error("'start' must be a string") + + # Look for error handler + def get_error_func(self): + self.error_func = self.pdict.get('p_error') + + # Validate the error function + def validate_error_func(self): + if self.error_func: + if isinstance(self.error_func, types.FunctionType): + ismethod = 0 + elif isinstance(self.error_func, types.MethodType): + ismethod = 1 + else: + self.log.error("'p_error' defined, but is not a function or method") + self.error = True + return + + eline = self.error_func.__code__.co_firstlineno + efile = self.error_func.__code__.co_filename + module = inspect.getmodule(self.error_func) + self.modules.add(module) + + argcount = self.error_func.__code__.co_argcount - ismethod + if argcount != 1: + self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) + self.error = True + + # Get the tokens map + def get_tokens(self): + tokens = self.pdict.get('tokens') + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = sorted(tokens) + + # Validate the tokens + def validate_tokens(self): + # Validate the tokens. + if 'error' in self.tokens: + self.log.error("Illegal token name 'error'. Is a reserved word") + self.error = True + return + + terminals = set() + for n in self.tokens: + if n in terminals: + self.log.warning('Token %r multiply defined', n) + terminals.add(n) + + # Get the precedence map (if any) + def get_precedence(self): + self.prec = self.pdict.get('precedence') + + # Validate and parse the precedence map + def validate_precedence(self): + preclist = [] + if self.prec: + if not isinstance(self.prec, (list, tuple)): + self.log.error('precedence must be a list or tuple') + self.error = True + return + for level, p in enumerate(self.prec): + if not isinstance(p, (list, tuple)): + self.log.error('Bad precedence table') + self.error = True + return + + if len(p) < 2: + self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) + self.error = True + return + assoc = p[0] + if not isinstance(assoc, string_types): + self.log.error('precedence associativity must be a string') + self.error = True + return + for term in p[1:]: + if not isinstance(term, string_types): + self.log.error('precedence items must be strings') + self.error = True + return + preclist.append((term, assoc, level+1)) + self.preclist = preclist + + # Get all p_functions from the grammar + def get_pfunctions(self): + p_functions = [] + for name, item in self.pdict.items(): + if not name.startswith('p_') or name == 'p_error': + continue + if isinstance(item, (types.FunctionType, types.MethodType)): + line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) + module = inspect.getmodule(item) + p_functions.append((line, module, name, item.__doc__)) + + # Sort all of the actions by line number; make sure to stringify + # modules to make them sortable, since `line` may not uniquely sort all + # p functions + p_functions.sort(key=lambda p_function: ( + p_function[0], + str(p_function[1]), + p_function[2], + p_function[3])) + self.pfuncs = p_functions + + # Validate all of the p_functions + def validate_pfunctions(self): + grammar = [] + # Check for non-empty symbols + if len(self.pfuncs) == 0: + self.log.error('no rules of the form p_rulename are defined') + self.error = True + return + + for line, module, name, doc in self.pfuncs: + file = inspect.getsourcefile(module) + func = self.pdict[name] + if isinstance(func, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + if func.__code__.co_argcount > reqargs: + self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) + self.error = True + elif func.__code__.co_argcount < reqargs: + self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) + self.error = True + elif not func.__doc__: + self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', + file, line, func.__name__) + else: + try: + parsed_g = parse_grammar(doc, file, line) + for g in parsed_g: + grammar.append((name, g)) + except SyntaxError as e: + self.log.error(str(e)) + self.error = True + + # Looks like a valid grammar rule + # Mark the file in which defined. + self.modules.add(module) + + # Secondary validation step that looks for p_ definitions that are not functions + # or functions that look like they might be grammar rules. + + for n, v in self.pdict.items(): + if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): + continue + if n.startswith('t_'): + continue + if n.startswith('p_') and n != 'p_error': + self.log.warning('%r not defined as a function', n) + if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or + (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): + if v.__doc__: + try: + doc = v.__doc__.split(' ') + if doc[1] == ':': + self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', + v.__code__.co_filename, v.__code__.co_firstlineno, n) + except IndexError: + pass + + self.grammar = grammar + +# ----------------------------------------------------------------------------- +# yacc(module) +# +# Build a parser +# ----------------------------------------------------------------------------- + +def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, + check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, + outputdir=None, debuglog=None, errorlog=None, picklefile=None): + + if tabmodule is None: + tabmodule = tab_module + + # Reference to the parsing method of the last built parser + global parse + + # If pickling is enabled, table files are not created + if picklefile: + write_tables = 0 + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the parser + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + pdict = dict(_items) + # If no __file__ or __package__ attributes are available, try to obtain them + # from the __module__ instead + if '__file__' not in pdict: + pdict['__file__'] = sys.modules[pdict['__module__']].__file__ + if '__package__' not in pdict and '__module__' in pdict: + if hasattr(sys.modules[pdict['__module__']], '__package__'): + pdict['__package__'] = sys.modules[pdict['__module__']].__package__ + else: + pdict = get_caller_module_dict(2) + + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If tabmodule specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(tabmodule, types.ModuleType): + srcfile = tabmodule.__file__ + else: + if '.' not in tabmodule: + srcfile = pdict['__file__'] + else: + parts = tabmodule.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = pdict.get('__package__') + if pkg and isinstance(tabmodule, str): + if '.' not in tabmodule: + tabmodule = pkg + '.' + tabmodule + + + + # Set start symbol if it's specified directly using an argument + if start is not None: + pdict['start'] = start + + # Collect parser information from the dictionary + pinfo = ParserReflect(pdict, log=errorlog) + pinfo.get_all() + + if pinfo.error: + raise YaccError('Unable to build parser') + + # Check signature against table files (if any) + signature = pinfo.signature() + + # Read the tables + try: + lr = LRTable() + if picklefile: + read_signature = lr.read_pickle(picklefile) + else: + read_signature = lr.read_table(tabmodule) + if optimize or (read_signature == signature): + try: + lr.bind_callables(pinfo.pdict) + parser = LRParser(lr, pinfo.error_func) + parse = parser.parse + return parser + except Exception as e: + errorlog.warning('There was a problem loading the table file: %r', e) + except VersionError as e: + errorlog.warning(str(e)) + except ImportError: + pass + + if debuglog is None: + if debug: + try: + debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) + except IOError as e: + errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) + debuglog = NullLogger() + else: + debuglog = NullLogger() + + debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) + + errors = False + + # Validate the parser information + if pinfo.validate_all(): + raise YaccError('Unable to build parser') + + if not pinfo.error_func: + errorlog.warning('no p_error() function is defined') + + # Create a grammar object + grammar = Grammar(pinfo.tokens) + + # Set precedence level for terminals + for term, assoc, level in pinfo.preclist: + try: + grammar.set_precedence(term, assoc, level) + except GrammarError as e: + errorlog.warning('%s', e) + + # Add productions to the grammar + for funcname, gram in pinfo.grammar: + file, line, prodname, syms = gram + try: + grammar.add_production(prodname, syms, funcname, file, line) + except GrammarError as e: + errorlog.error('%s', e) + errors = True + + # Set the grammar start symbols + try: + if start is None: + grammar.set_start(pinfo.start) + else: + grammar.set_start(start) + except GrammarError as e: + errorlog.error(str(e)) + errors = True + + if errors: + raise YaccError('Unable to build parser') + + # Verify the grammar structure + undefined_symbols = grammar.undefined_symbols() + for sym, prod in undefined_symbols: + errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) + errors = True + + unused_terminals = grammar.unused_terminals() + if unused_terminals: + debuglog.info('') + debuglog.info('Unused terminals:') + debuglog.info('') + for term in unused_terminals: + errorlog.warning('Token %r defined, but not used', term) + debuglog.info(' %s', term) + + # Print out all productions to the debug log + if debug: + debuglog.info('') + debuglog.info('Grammar') + debuglog.info('') + for n, p in enumerate(grammar.Productions): + debuglog.info('Rule %-5d %s', n, p) + + # Find unused non-terminals + unused_rules = grammar.unused_rules() + for prod in unused_rules: + errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) + + if len(unused_terminals) == 1: + errorlog.warning('There is 1 unused token') + if len(unused_terminals) > 1: + errorlog.warning('There are %d unused tokens', len(unused_terminals)) + + if len(unused_rules) == 1: + errorlog.warning('There is 1 unused rule') + if len(unused_rules) > 1: + errorlog.warning('There are %d unused rules', len(unused_rules)) + + if debug: + debuglog.info('') + debuglog.info('Terminals, with rules where they appear') + debuglog.info('') + terms = list(grammar.Terminals) + terms.sort() + for term in terms: + debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) + + debuglog.info('') + debuglog.info('Nonterminals, with rules where they appear') + debuglog.info('') + nonterms = list(grammar.Nonterminals) + nonterms.sort() + for nonterm in nonterms: + debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) + debuglog.info('') + + if check_recursion: + unreachable = grammar.find_unreachable() + for u in unreachable: + errorlog.warning('Symbol %r is unreachable', u) + + infinite = grammar.infinite_cycles() + for inf in infinite: + errorlog.error('Infinite recursion detected for symbol %r', inf) + errors = True + + unused_prec = grammar.unused_precedence() + for term, assoc in unused_prec: + errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) + errors = True + + if errors: + raise YaccError('Unable to build parser') + + # Run the LRGeneratedTable on the grammar + if debug: + errorlog.debug('Generating %s tables', method) + + lr = LRGeneratedTable(grammar, method, debuglog) + + if debug: + num_sr = len(lr.sr_conflicts) + + # Report shift/reduce and reduce/reduce conflicts + if num_sr == 1: + errorlog.warning('1 shift/reduce conflict') + elif num_sr > 1: + errorlog.warning('%d shift/reduce conflicts', num_sr) + + num_rr = len(lr.rr_conflicts) + if num_rr == 1: + errorlog.warning('1 reduce/reduce conflict') + elif num_rr > 1: + errorlog.warning('%d reduce/reduce conflicts', num_rr) + + # Write out conflicts to the output file + if debug and (lr.sr_conflicts or lr.rr_conflicts): + debuglog.warning('') + debuglog.warning('Conflicts:') + debuglog.warning('') + + for state, tok, resolution in lr.sr_conflicts: + debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) + + already_reported = set() + for state, rule, rejected in lr.rr_conflicts: + if (state, id(rule), id(rejected)) in already_reported: + continue + debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + debuglog.warning('rejected rule (%s) in state %d', rejected, state) + errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + errorlog.warning('rejected rule (%s) in state %d', rejected, state) + already_reported.add((state, id(rule), id(rejected))) + + warned_never = [] + for state, rule, rejected in lr.rr_conflicts: + if not rejected.reduced and (rejected not in warned_never): + debuglog.warning('Rule (%s) is never reduced', rejected) + errorlog.warning('Rule (%s) is never reduced', rejected) + warned_never.append(rejected) + + # Write the table file if requested + if write_tables: + try: + lr.write_table(tabmodule, outputdir, signature) + if tabmodule in sys.modules: + del sys.modules[tabmodule] + except IOError as e: + errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) + + # Write a pickled version of the tables + if picklefile: + try: + lr.pickle_table(picklefile, signature) + except IOError as e: + errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) + + # Build the parser + lr.bind_callables(pinfo.pdict) + parser = LRParser(lr, pinfo.error_func) + + parse = parser.parse + return parser diff --git a/docker-compose.yml b/docker-compose.yml index 6b25f80cf75..af3e559e7f4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -152,7 +152,7 @@ services: - "127.0.0.1:5433:5433" testrunner: - image: ghcr.io/datadog/dd-trace-py/testrunner@sha256:4c8afd048321e702f3605b4ae4d206fcd00e74bac708089cfe7f9c24383dc53b + image: ghcr.io/datadog/dd-trace-py/testrunner@sha256:8ca43d46ff34e078bd7bc0662e74e6be38547a98140a5cd4203805f6b214b583 command: bash environment: - TOX_SKIP_DIST=True diff --git a/docs/configuration.rst b/docs/configuration.rst index 3afe7ee817f..9fb0cb0a7ef 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -10,337 +10,354 @@ see specific integration documentation for more details. The following environment variables for the tracer are supported: +Unified Service Tagging +----------------------- .. ddtrace-configuration-options:: + DD_ENV: description: | Set an application's environment e.g. ``prod``, ``pre-prod``, ``staging``. Added in ``v0.36.0``. See `Unified Service Tagging`_ for more information. DD_SERVICE: default: (autodetected) + description: | Set the service name to be used for this application. A default is provided for these integrations: :ref:`bottle`, :ref:`flask`, :ref:`grpc`, :ref:`pyramid`, :ref:`tornado`, :ref:`celery`, :ref:`django` and :ref:`falcon`. Added in ``v0.36.0``. See `Unified Service Tagging`_ for more information. - DD_SERVICE_MAPPING: - description: | - Define service name mappings to allow renaming services in traces, e.g. ``postgres:postgresql,defaultdb:postgresql``. - - DD_TAGS: - description: | - Set global tags to be attached to every span. Value must be either comma or space separated. e.g. ``key1:value1,key2:value2`` or ``key1:value key2:value2``. - - If a tag value is not supplied the value will be an empty string. e.g. ``key1,key2`` or ``key1 key2``. - version_added: - v0.38.0: Comma separated support added - v0.48.0: Space separated support added - - DD_TRACE_PROPAGATION_HTTP_BAGGAGE_ENABLED: - type: Boolean - default: False - description: | - Enables propagation of baggage items through http headers with prefix ``ot-baggage-``. - version_added: - v2.4.0: - DD_VERSION: description: | Set an application's version in traces and logs e.g. ``1.2.3``, ``6c44da20``, ``2020.02.13``. Generally set along with ``DD_SERVICE``. See `Unified Service Tagging`_ for more information. + version_added: v0.36.0: - DD_SITE: - default: datadoghq.com - description: | - Specify which site to use for uploading profiles and logs. Set to - ``datadoghq.eu`` to use EU site. +Traces +------ - DD_TRACE_ENABLED: - type: Boolean +.. ddtrace-configuration-options:: + DD__DISTRIBUTED_TRACING: default: True + description: | - Enable sending of spans to the Agent. Note that instrumentation will still be installed and spans will be - generated. + Enables distributed tracing for the specified . + version_added: - v0.41.0: | - Formerly named ``DATADOG_TRACE_ENABLED`` + v2.7.0: - DD_TRACE_OTEL_ENABLED: - type: Boolean + DD__SERVICE: + type: String + default: + + description: | + Set the service name, allowing default service name overrides for traces for the specific . + + version_added: + v2.11.0: + + DD_ASGI_TRACE_WEBSOCKET: default: False + description: | - When used with ``ddtrace-run`` this configuration enables OpenTelemetry support. To enable OpenTelemetry without `ddtrace-run` refer - to the following :mod:`docs `. + Enables tracing ASGI websockets. Please note that the websocket span duration will last until the + connection is closed, which can result in long running spans. + version_added: - v1.12.0: - - DD_RUNTIME_METRICS_ENABLED: + v2.7.0: + + DD_BOTOCORE_EMPTY_POLL_ENABLED: + type: Boolean + default: True + + description: | + Enables creation of consumer span when AWS SQS and AWS Kinesis ``poll()`` operations return no records. When disabled, no consumer span is created + if no records are returned. + + version_added: + v2.6.0: + + DD_BOTOCORE_PROPAGATION_ENABLED: + type: Boolean + default: False + + description: | + Enables trace context propagation connecting producer and consumer spans within a single trace for AWS SQS, SNS, and Kinesis messaging services. + + version_added: + v2.6.0: + + DD_HTTP_SERVER_TAG_QUERY_STRING: type: Boolean - default: False + default: True + description: Send query strings in http.url tag in http server integrations. + + DD_SERVICE_MAPPING: description: | - When used with ``ddtrace-run`` this configuration enables sending runtime metrics to Datadog. - These metrics track the memory management and concurrency of the python runtime. - Refer to the following `docs ` _ for more information. + Define service name mappings to allow renaming services in traces, e.g. ``postgres:postgresql,defaultdb:postgresql``. - DD_INSTRUMENTATION_TELEMETRY_ENABLED: + DD_TRACE__ENABLED: type: Boolean default: True + description: | - Enables sending :ref:`telemetry ` events to the agent. + Enables to be patched. For example, ``DD_TRACE_DJANGO_ENABLED=false`` will disable the Django + integration from being installed. + + version_added: + v0.41.0: DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED: type: Boolean default: False + description: | This configuration enables the generation of 128 bit trace ids. + version_added: v1.12.0: - DD_TRACE_DEBUG: - type: Boolean - default: False + DD_TRACE_API_VERSION: + default: | + ``v0.5`` + description: | - Enables debug logging in the tracer. + The trace API version to use when sending traces to the Datadog agent. - Can be used with `DD_TRACE_LOG_FILE` to route logs to a file. + Currently, the supported versions are: ``v0.4`` and ``v0.5``. + version_added: - v0.41.0: | - Formerly named ``DATADOG_TRACE_DEBUG`` + v0.56.0: + v1.7.0: default changed to ``v0.5``. + v1.19.1: default reverted to ``v0.4``. + v2.4.0: default changed to ``v0.5``. - DD_TRACE_LOG_FILE_LEVEL: - default: DEBUG - description: | - Configures the ``RotatingFileHandler`` used by the `ddtrace` logger to write logs to a file based on the level specified. - Defaults to `DEBUG`, but will accept the values found in the standard **logging** library, such as WARNING, ERROR, and INFO, - if further customization is needed. Files are not written to unless ``DD_TRACE_LOG_FILE`` has been defined. + DD_TRACE_CLOUD_PAYLOAD_TAGGING_MAX_DEPTH: + type: Integer + default: 10 + description: | + Sets the depth of expanding the JSON AWS payload after which we stop creating tags. + version_added: + v2.17.0: - DD_TRACE_LOG_FILE: - description: | - Directs `ddtrace` logs to a specific file. Note: The default backup count is 1. For larger logs, use with ``DD_TRACE_LOG_FILE_SIZE_BYTES``. - To fine tune the logging level, use with ``DD_TRACE_LOG_FILE_LEVEL``. + DD_TRACE_CLOUD_PAYLOAD_TAGGING_MAX_TAGS: + type: Integer + default: 758 + description: | + Sets the the maximum number of tags that will be added when expanding an AWS payload. + version_added: + v2.17.0: - DD_TRACE_LOG_FILE_SIZE_BYTES: - type: Int - default: 15728640 - description: | - Max size for a file when used with `DD_TRACE_LOG_FILE`. When a log has exceeded this size, there will be one backup log file created. - In total, the files will store ``2 * DD_TRACE_LOG_FILE_SIZE_BYTES`` worth of logs. + DD_TRACE_CLOUD_PAYLOAD_TAGGING_SERVICES: + type: Set + default: {"s3", "sns", "sqs", "kinesis", "eventbridge"} + description: | + Sets the enabled AWS services to be expanded when AWS payload tagging is enabled. + version_added: + v2.17.0: - DD_TRACE__ENABLED: + DD_TRACE_CLOUD_REQUEST_PAYLOAD_TAGGING: + type: String + default: None + description: | + Enables AWS request payload tagging when set to ``"all"`` or a valid comma-separated list of ``JSONPath``\s. + version_added: + v2.17.0: + + DD_TRACE_CLOUD_RESPONSE_PAYLOAD_TAGGING: + type: String + default: None + description: | + Enables AWS response payload tagging when set to ``"all"`` or a valid comma-separated list of ``JSONPath``\s. + version_added: + v2.17.0: + + DD_TRACE_ENABLED: type: Boolean default: True + description: | - Enables to be patched. For example, ``DD_TRACE_DJANGO_ENABLED=false`` will disable the Django - integration from being installed. + Enable sending of spans to the Agent. Note that instrumentation will still be installed and spans will be + generated. + version_added: - v0.41.0: + v0.41.0: | + Formerly named ``DATADOG_TRACE_ENABLED`` - DD_PATCH_MODULES: + DD_TRACE_HEADER_TAGS: description: | - Override the modules patched for this execution of the program. Must be - a list in the ``module1:boolean,module2:boolean`` format. For example, - ``boto:true,redis:false``. - version_added: - v0.55.0: | - Formerly named ``DATADOG_PATCH_MODULES`` + A map of case-insensitive http headers to tag names. Automatically applies matching header values as tags on request and response spans. For example if + ``DD_TRACE_HEADER_TAGS=User-Agent:http.useragent,content-type:http.content_type``. The value of the header will be stored in tags with the name ``http.useragent`` and ``http.content_type``. - DD_LOGS_INJECTION: + If a tag name is not supplied the header name will be used. For example if + ``DD_TRACE_HEADER_TAGS=User-Agent,content-type``. The value of http header will be stored in tags with the names ``http..headers.user-agent`` and ``http..headers.content-type``. + + DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING: type: Boolean - default: False - description: Enables :ref:`Logs Injection`. + default: True + description: Send query strings in http.url tag in http client integrations. - DD_AGENT_HOST: + DD_TRACE_HTTP_SERVER_ERROR_STATUSES: type: String - default: | - ``localhost`` + default: "500-599" + description: | - The host name to use to connect the Datadog agent for traces. The host name - can be IPv4, IPv6, or a domain name. If ``DD_TRACE_AGENT_URL`` is specified, the - value of ``DD_AGENT_HOST`` is ignored. - - Example for IPv4: ``DD_AGENT_HOST=192.168.10.1`` - - Example for IPv6: ``DD_AGENT_HOST=2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF`` + Comma-separated list of HTTP status codes that should be considered errors when returned by an HTTP request. + Multiple comma separated error ranges can be set (ex: ``200,400-404,500-599``). + The status codes are used to set the ``error`` field on the span. - Example for domain name: ``DD_AGENT_HOST=host`` + DD_TRACE_METHODS: + type: String + default: "" + + description: | + Specify methods to trace. For example: ``mod.submod:method1,method2;mod.submod:Class.method1``. + Note that this setting is only compatible with ``ddtrace-run``, and that it doesn't work for methods implemented + by libraries for which there's an integration in ``ddtrace/contrib``. + version_added: - v0.17.0: - v1.7.0: + v2.1.0: - DD_TRACE_AGENT_URL: - type: URL + DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP: default: | - ``unix:///var/run/datadog/apm.socket`` if available - otherwise ``http://localhost:8126`` + ``'(?ix)(?:(?:"|%22)?)(?:(?:old[-_]?|new[-_]?)?p(?:ass)?w(?:or)?d(?:1|2)?|pass(?:[-_]?phrase)?|secret|(?:api[-_]?|private[-_]?|public[-_]?|access[-_]?|secret[-_]?)key(?:[-_]?id)?|token|consumer[-_]?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:(?:\\s|%20)*(?:=|%3D)[^&]+|(?:"|%22)(?:\\s|%20)*(?::|%3A)(?:\\s|%20)*(?:"|%22)(?:%2[^2]|%[^2]|[^"%])+(?:"|%22))|(?: bearer(?:\\s|%20)+[a-z0-9._\\-]+|token(?::|%3A)[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L](?:[\\w=-]|%3D)+\\.ey[I-L](?:[\\w=-]|%3D)+(?:\\.(?:[\\w.+/=-]|%3D|%2F|%2B)+)?|-{5}BEGIN(?:[a-z\\s]|%20)+PRIVATE(?:\\s|%20)KEY-{5}[^\\-]+-{5}END(?:[a-z\\s]|%20)+PRIVATE(?:\\s|%20)KEY(?:-{5})?(?:\\n|%0A)?|(?:ssh-(?:rsa|dss)|ecdsa-[a-z0-9]+-[a-z0-9]+)(?:\\s|%20|%09)+(?:[a-z0-9/.+]|%2F|%5C|%2B){100,}(?:=|%3D)*(?:(?:\\s|%20|%09)+[a-z0-9._-]+)?)'`` + + description: A regexp to redact sensitive query strings. Obfuscation disabled if set to empty string + + version_added: + v1.19.0: | + ``DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP`` replaces ``DD_TRACE_OBFUSCATION_QUERY_STRING_PATTERN`` which is deprecated + and will be deleted in 2.0.0 + + DD_TRACE_OTEL_ENABLED: + type: Boolean + default: False + description: | - The URL to use to connect the Datadog agent for traces. The url can start with - ``http://`` to connect using HTTP or with ``unix://`` to use a Unix - Domain Socket. + When used with ``ddtrace-run`` this configuration enables OpenTelemetry support. To enable OpenTelemetry without `ddtrace-run` refer + to the following :mod:`docs `. + + version_added: + v1.12.0: - Example for http url: ``DD_TRACE_AGENT_URL=http://localhost:8126`` + DD_TRACE_PARTIAL_FLUSH_ENABLED: + type: Boolean + default: True + description: Prevents large payloads being sent to APM. - Example for UDS: ``DD_TRACE_AGENT_URL=unix:///var/run/datadog/apm.socket`` + DD_TRACE_PARTIAL_FLUSH_MIN_SPANS: + type: Integer + default: 300 + description: Maximum number of spans sent per trace per payload when ``DD_TRACE_PARTIAL_FLUSH_ENABLED=True``. - DD_DOGSTATSD_URL: - type: URL + DD_TRACE_PROPAGATION_EXTRACT_FIRST: + type: Boolean + default: False + description: Whether the propagator stops after extracting the first header. + + version_added: + v2.3.0: + + DD_TRACE_PROPAGATION_HTTP_BAGGAGE_ENABLED: + type: Boolean + default: False + + description: | + Enables propagation of baggage items through http headers with prefix ``ot-baggage-``. + + version_added: + v2.4.0: + + DD_TRACE_PROPAGATION_STYLE: default: | - ``unix:///var/run/datadog/dsd.socket`` if available - otherwise ``udp://localhost:8125`` + ``datadog,tracecontext`` + description: | - The URL to use to connect the Datadog agent for Dogstatsd metrics. The url can start with - ``udp://`` to connect using UDP or with ``unix://`` to use a Unix - Domain Socket. + Comma separated list of propagation styles used for extracting trace context from inbound request headers and injecting trace context into outbound request headers. - Example for UDP url: ``DD_DOGSTATSD_URL=udp://localhost:8125`` + Overridden by ``DD_TRACE_PROPAGATION_STYLE_EXTRACT`` for extraction. - Example for UDS: ``DD_DOGSTATSD_URL=unix:///var/run/datadog/dsd.socket`` + Overridden by ``DD_TRACE_PROPAGATION_STYLE_INJECT`` for injection. - DD_TRACE_AGENT_TIMEOUT_SECONDS: - type: Float - default: 2.0 - description: The timeout in float to use to connect to the Datadog agent. + The supported values are ``datadog``, ``b3multi``, and ``b3 single header``, ``tracecontext``, and ``none``. + + When checking inbound request headers we will take the first valid trace context in the order provided. + When ``none`` is the only propagator listed, propagation is disabled. + + All provided styles are injected into the headers of outbound requests. + + Example: ``DD_TRACE_PROPAGATION_STYLE="datadog,b3"`` to check for both ``x-datadog-*`` and ``x-b3-*`` + headers when parsing incoming request headers for a trace context. In addition, to inject both ``x-datadog-*`` and ``x-b3-*`` + headers into outbound requests. + + version_added: + v1.7.0: The ``b3multi`` propagation style was added and ``b3`` was deprecated in favor it. + v1.7.0: Added support for ``tracecontext`` W3C headers. Changed the default value to ``DD_TRACE_PROPAGATION_STYLE="tracecontext,datadog"``. + v2.6.0: Updated default value to ``datadog,tracecontext``. + + DD_TRACE_SPAN_TRACEBACK_MAX_SIZE: + type: Integer + default: 30 + + description: | + The maximum length of a traceback included in a span. + + version_added: + v2.3.0: DD_TRACE_WRITER_BUFFER_SIZE_BYTES: type: Int default: 8388608 description: The max size in bytes of traces to buffer between flushes to the agent. - DD_TRACE_WRITER_MAX_PAYLOAD_SIZE_BYTES: - type: Int - default: 8388608 - description: | - The max size in bytes of each payload item sent to the trace agent. If the max payload size is greater than buffer size, - then max size of each payload item will be the buffer size. - DD_TRACE_WRITER_INTERVAL_SECONDS: type: Float default: 1.0 description: The time between each flush of traces to the trace agent. - DD_TRACE_STARTUP_LOGS: - type: Boolean - default: False - description: Enable or disable start up diagnostic logging. - - DD_TRACE_RATE_LIMIT: - type: int - default: 100 - description: | - Maximum number of traces per second to sample. Set a rate limit to avoid the ingestion volume overages in the case of traffic spikes. This configuration - is only applied when client based sampling is configured, otherwise agent based rate limits are used. - version_added: - v0.33.0: - v2.15.0: Only applied when DD_TRACE_SAMPLE_RATE, DD_TRACE_SAMPLING_RULES, or DD_SPAN_SAMPLING_RULE are set. - - DD_TRACE_SAMPLING_RULES: - type: JSON array - description: | - A JSON array of objects. Each object must have a “sample_rate”, and the “name”, “service”, "resource", and "tags" fields are optional. The “sample_rate” value must be between 0.0 and 1.0 (inclusive). - - **Example:** ``DD_TRACE_SAMPLING_RULES='[{"sample_rate":0.5,"service":"my-service","resource":"my-url","tags":{"my-tag":"example"}}]'`` - - **Note** that the JSON object must be included in single quotes (') to avoid problems with escaping of the double quote (") character.' - version_added: - v1.19.0: added support for "resource" - v1.20.0: added support for "tags" - v2.8.0: added lazy sampling support, so that spans are evaluated at the end of the trace, guaranteeing more metadata to evaluate against. - - DD_SPAN_SAMPLING_RULES: - type: string - description: | - A JSON array of objects. Each object must have a "name" and/or "service" field, while the "max_per_second" and "sample_rate" fields are optional. - The "sample_rate" value must be between 0.0 and 1.0 (inclusive), and will default to 1.0 (100% sampled). - The "max_per_second" value must be >= 0 and will default to no limit. - The "service" and "name" fields can be glob patterns: - "*" matches any substring, including the empty string, - "?" matches exactly one of any character, and any other character matches exactly one of itself. - - **Example:** ``DD_SPAN_SAMPLING_RULES='[{"sample_rate":0.5,"service":"my-serv*","name":"flask.re?uest"}]'`` - - version_added: - v1.4.0: - - DD_SPAN_SAMPLING_RULES_FILE: - type: string + DD_TRACE_WRITER_MAX_PAYLOAD_SIZE_BYTES: + type: Int + default: 8388608 + description: | - A path to a JSON file containing span sampling rules organized as JSON array of objects. - For the rules each object must have a "name" and/or "service" field, and the "sample_rate" field is optional. - The "sample_rate" value must be between 0.0 and 1.0 (inclusive), and will default to 1.0 (100% sampled). - The "max_per_second" value must be >= 0 and will default to no limit. - The "service" and "name" fields are glob patterns, where "glob" means: - "*" matches any substring, including the empty string, - "?" matches exactly one of any character, and any other character matches exactly one of itself. - - **Example:** ``DD_SPAN_SAMPLING_RULES_FILE="data/span_sampling_rules.json"'`` - **Example File Contents:** ``[{"sample_rate":0.5,"service":"*-service","name":"my-name-????", "max_per_second":"20"}, {"service":"xy?","name":"a*c"}]`` - - version_added: - v1.4.0: + The max size in bytes of each payload item sent to the trace agent. If the max payload size is greater than buffer size, + then max size of each payload item will be the buffer size. - DD_TRACE_HEADER_TAGS: + DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH: + type: Integer + default: 512 + description: | - A map of case-insensitive http headers to tag names. Automatically applies matching header values as tags on request and response spans. For example if - ``DD_TRACE_HEADER_TAGS=User-Agent:http.useragent,content-type:http.content_type``. The value of the header will be stored in tags with the name ``http.useragent`` and ``http.content_type``. - - If a tag name is not supplied the header name will be used. For example if - ``DD_TRACE_HEADER_TAGS=User-Agent,content-type``. The value of http header will be stored in tags with the names ``http..headers.user-agent`` and ``http..headers.content-type``. + The maximum length of ``x-datadog-tags`` header allowed in the Datadog propagation style. + Must be a value between 0 to 512. If 0, propagation of ``x-datadog-tags`` is disabled. - DD_TRACE_API_VERSION: - default: | - ``v0.5`` + DD_UNLOAD_MODULES_FROM_SITECUSTOMIZE: + type: String + default: "auto" + description: | - The trace API version to use when sending traces to the Datadog agent. - - Currently, the supported versions are: ``v0.4`` and ``v0.5``. - version_added: - v0.56.0: - v1.7.0: default changed to ``v0.5``. - v1.19.1: default reverted to ``v0.4``. - v2.4.0: default changed to ``v0.5``. - - DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP: - default: | - ``'(?ix)(?:(?:"|%22)?)(?:(?:old[-_]?|new[-_]?)?p(?:ass)?w(?:or)?d(?:1|2)?|pass(?:[-_]?phrase)?|secret|(?:api[-_]?|private[-_]?|public[-_]?|access[-_]?|secret[-_]?)key(?:[-_]?id)?|token|consumer[-_]?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:(?:\\s|%20)*(?:=|%3D)[^&]+|(?:"|%22)(?:\\s|%20)*(?::|%3A)(?:\\s|%20)*(?:"|%22)(?:%2[^2]|%[^2]|[^"%])+(?:"|%22))|(?: bearer(?:\\s|%20)+[a-z0-9._\\-]+|token(?::|%3A)[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L](?:[\\w=-]|%3D)+\\.ey[I-L](?:[\\w=-]|%3D)+(?:\\.(?:[\\w.+/=-]|%3D|%2F|%2B)+)?|-{5}BEGIN(?:[a-z\\s]|%20)+PRIVATE(?:\\s|%20)KEY-{5}[^\\-]+-{5}END(?:[a-z\\s]|%20)+PRIVATE(?:\\s|%20)KEY(?:-{5})?(?:\\n|%0A)?|(?:ssh-(?:rsa|dss)|ecdsa-[a-z0-9]+-[a-z0-9]+)(?:\\s|%20|%09)+(?:[a-z0-9/.+]|%2F|%5C|%2B){100,}(?:=|%3D)*(?:(?:\\s|%20|%09)+[a-z0-9._-]+)?)'`` - description: A regexp to redact sensitive query strings. Obfuscation disabled if set to empty string + Controls whether module cloning logic is executed by ``ddtrace-run``. Module cloning involves saving copies of dependency modules for internal use by ``ddtrace`` + that will be unaffected by future imports of and changes to those modules by application code. Valid values for this variable are ``1``, ``0``, and ``auto``. ``1`` tells + ``ddtrace`` to run its module cloning logic unconditionally, ``0`` tells it not to run that logic, and ``auto`` tells it to run module cloning logic only if ``gevent`` + is accessible from the application's runtime. + version_added: - v1.19.0: | - ``DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP`` replaces ``DD_TRACE_OBFUSCATION_QUERY_STRING_PATTERN`` which is deprecated - and will be deleted in 2.0.0 - - DD_TRACE_PROPAGATION_STYLE: - default: | - ``datadog,tracecontext`` - description: | - Comma separated list of propagation styles used for extracting trace context from inbound request headers and injecting trace context into outbound request headers. - - Overridden by ``DD_TRACE_PROPAGATION_STYLE_EXTRACT`` for extraction. - - Overridden by ``DD_TRACE_PROPAGATION_STYLE_INJECT`` for injection. - - The supported values are ``datadog``, ``b3multi``, and ``b3 single header``, ``tracecontext``, and ``none``. - - When checking inbound request headers we will take the first valid trace context in the order provided. - When ``none`` is the only propagator listed, propagation is disabled. - - All provided styles are injected into the headers of outbound requests. + v1.9.0: - Example: ``DD_TRACE_PROPAGATION_STYLE="datadog,b3"`` to check for both ``x-datadog-*`` and ``x-b3-*`` - headers when parsing incoming request headers for a trace context. In addition, to inject both ``x-datadog-*`` and ``x-b3-*`` - headers into outbound requests. +Trace Context propagation +------------------------- - version_added: - v1.7.0: The ``b3multi`` propagation style was added and ``b3`` was deprecated in favor it. - v1.7.0: Added support for ``tracecontext`` W3C headers. Changed the default value to ``DD_TRACE_PROPAGATION_STYLE="tracecontext,datadog"``. - v2.6.0: Updated default value to ``datadog,tracecontext``. +.. ddtrace-configuration-options:: DD_TRACE_PROPAGATION_STYLE_EXTRACT: default: | ``datadog,tracecontext`` + description: | Comma separated list of propagation styles used for extracting trace context from inbound request headers. @@ -360,6 +377,7 @@ The following environment variables for the tracer are supported: DD_TRACE_PROPAGATION_STYLE_INJECT: default: | ``tracecontext,datadog`` + description: | Comma separated list of propagation styles used for injecting trace context into outbound request headers. @@ -376,101 +394,49 @@ The following environment variables for the tracer are supported: version_added: v1.7.0: The ``b3multi`` propagation style was added and ``b3`` was deprecated in favor it. - DD_TRACE_PROPAGATION_EXTRACT_FIRST: - type: Boolean - default: False - description: Whether the propagator stops after extracting the first header. - version_added: - v2.3.0: - - DD_TRACE_X_DATADOG_TAGS_MAX_LENGTH: - type: Integer - default: 512 - description: | - The maximum length of ``x-datadog-tags`` header allowed in the Datadog propagation style. - Must be a value between 0 to 512. If 0, propagation of ``x-datadog-tags`` is disabled. - - DD_TRACE_PARTIAL_FLUSH_ENABLED: - type: Boolean - default: True - description: Prevents large payloads being sent to APM. - - DD_ASGI_TRACE_WEBSOCKET: - default: False - description: | - Enables tracing ASGI websockets. Please note that the websocket span duration will last until the - connection is closed, which can result in long running spans. +AppSec +------ - version_added: - v2.7.0: +.. ddtrace-configuration-options:: - DD_TRACE_PARTIAL_FLUSH_MIN_SPANS: - type: Integer - default: 300 - description: Maximum number of spans sent per trace per payload when ``DD_TRACE_PARTIAL_FLUSH_ENABLED=True``. + DD_APPSEC_AUTOMATED_USER_EVENTS_TRACKING: + type: String + default: "safe" + + description: | + Sets the mode for the automated user login events tracking feature which sets some traces on each user login event. The + supported modes are ``safe`` which will only store the user id or primary key, ``extended`` which will also store + the username, email and full name and ``disabled``. Note that this feature requires ``DD_APPSEC_ENABLED`` to be + set to ``true`` to work. + + version_added: + v1.17.0: Added support to the Django integration. No other integrations support this configuration. DD_APPSEC_ENABLED: type: Boolean default: False description: Whether to enable AppSec monitoring. - DD_APPSEC_SCA_ENABLED: - type: Boolean - default: None - description: Whether to enable/disable SCA (Software Composition Analysis). - - DD_APPSEC_RULES: - type: String - description: Path to a json file containing AppSec rules. - - DD_COMPILE_DEBUG: - type: Boolean - default: False - description: Compile Cython extensions in RelWithDebInfo mode (with debug info, but no debug code or asserts) - DD_APPSEC_OBFUSCATION_PARAMETER_KEY_REGEXP: default: | ``(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?)key)|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization`` + description: Sensitive parameter key regexp for obfuscation. DD_APPSEC_OBFUSCATION_PARAMETER_VALUE_REGEXP: default: | ``(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:\s*=[^;]|"\s*:\s*"[^"]+")|bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}`` + description: Sensitive parameter value regexp for obfuscation. - DD_SUBPROCESS_SENSITIVE_WILDCARDS: + DD_APPSEC_RULES: type: String - description: | - Add more possible matches to the internal list of subprocess execution argument scrubbing. Must be a comma-separated list and - each item can take `fnmatch` style wildcards, for example: ``*ssn*,*personalid*,*idcard*,*creditcard*``. - - DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING: - type: Boolean - default: True - description: Send query strings in http.url tag in http client integrations. + description: Path to a json file containing AppSec rules. - DD_HTTP_SERVER_TAG_QUERY_STRING: + DD_APPSEC_SCA_ENABLED: type: Boolean - default: True - description: Send query strings in http.url tag in http server integrations. - - DD_TRACE_HTTP_SERVER_ERROR_STATUSES: - type: String - default: "500-599" - description: | - Comma-separated list of HTTP status codes that should be considered errors when returned by an HTTP request. - Multiple comma separated error ranges can be set (ex: ``200,400-404,500-599``). - The status codes are used to set the ``error`` field on the span. - - DD_TRACE_METHODS: - type: String - default: "" - description: | - Specify methods to trace. For example: ``mod.submod:method1,method2;mod.submod:Class.method1``. - Note that this setting is only compatible with ``ddtrace-run``, and that it doesn't work for methods implemented - by libraries for which there's an integration in ``ddtrace/contrib``. - version_added: - v2.1.0: + default: None + description: Whether to enable/disable SCA (Software Composition Analysis). DD_IAST_ENABLED: type: Boolean @@ -482,184 +448,398 @@ The following environment variables for the tracer are supported: default: 2 description: Number of requests analyzed at the same time. - DD_IAST_VULNERABILITIES_PER_REQUEST: - type: Integer - default: 2 - description: Number of vulnerabilities reported in each request. - - DD_IAST_WEAK_HASH_ALGORITHMS: - type: String - default: "MD5,SHA1" - description: Weak hashing algorithms that should be reported, comma separated. - - DD_IAST_WEAK_CIPHER_ALGORITHMS: - type: String - default: "DES,Blowfish,RC2,RC4,IDEA" - description: Weak cipher algorithms that should be reported, comma separated. - - DD_IAST_REDACTION_ENABLED: - type: Boolean - default: True - description: | - Replace potentially sensitive information in the vulnerability report, like passwords with ``*`` for non tainted strings and ``abcde...`` - for tainted ones. This will use the regular expressions of the two next settings to decide what to scrub. - version_added: - v1.17.0: + DD_IAST_REDACTION_ENABLED: + type: Boolean + default: True + + description: | + Replace potentially sensitive information in the vulnerability report, like passwords with ``*`` for non tainted strings and ``abcde...`` + for tainted ones. This will use the regular expressions of the two next settings to decide what to scrub. + + version_added: + v1.17.0: DD_IAST_REDACTION_NAME_PATTERN: type: String + default: | ``(?i)^.*(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)`` + description: | Regular expression containing key or name style strings matched against vulnerability origin and evidence texts. If it matches, the scrubbing of the report will be enabled. + version_added: v1.17.0: DD_IAST_REDACTION_VALUE_PATTERN: type: String + default: | ``(?i)bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}`` + description: | Regular expression containing value style strings matched against vulnerability origin and evidence texts. If it matches, the scrubbing of the report will be enabled. + version_added: v1.17.0: - DD_UNLOAD_MODULES_FROM_SITECUSTOMIZE: + DD_IAST_VULNERABILITIES_PER_REQUEST: + type: Integer + default: 2 + description: Number of vulnerabilities reported in each request. + + DD_IAST_WEAK_HASH_ALGORITHMS: type: String - default: "auto" - description: | - Controls whether module cloning logic is executed by ``ddtrace-run``. Module cloning involves saving copies of dependency modules for internal use by ``ddtrace`` - that will be unaffected by future imports of and changes to those modules by application code. Valid values for this variable are ``1``, ``0``, and ``auto``. ``1`` tells - ``ddtrace`` to run its module cloning logic unconditionally, ``0`` tells it not to run that logic, and ``auto`` tells it to run module cloning logic only if ``gevent`` - is accessible from the application's runtime. - version_added: - v1.9.0: + default: "MD5,SHA1" + description: Weak hashing algorithms that should be reported, comma separated. - DD_TEST_SESSION_NAME: + DD_IAST_WEAK_CIPHER_ALGORITHMS: type: String - default: (autodetected) - description: | - Configures the ``CIVisibility`` service to use the given string as the value of the ``test_session.name`` tag in - test events. If not specified, this string will be constructed from the CI job id (if available) and the test - command used to start the test session. - version_added: - v2.16.0: + default: "DES,Blowfish,RC2,RC4,IDEA" + description: Weak cipher algorithms that should be reported, comma separated. + + +Test Visibility +--------------- + +.. ddtrace-configuration-options:: DD_CIVISIBILITY_AGENTLESS_ENABLED: type: Boolean default: False + description: | Configures the ``CIVisibility`` service to use a test-reporting ``CIVisibilityWriter``. This writer sends payloads for traces on which it's used to the intake endpoint for Datadog CI Visibility. If there is a reachable Datadog agent that supports proxying these requests, the writer will send its payloads to that agent instead. + version_added: v1.12.0: DD_CIVISIBILITY_AGENTLESS_URL: type: String default: "" + description: | Configures the ``CIVisibility`` service to send event payloads to the specified host. If unspecified, the host "https://citestcycle-intake." is used, where ```` is replaced by that environment variable's value, or "datadoghq.com" if unspecified. + version_added: v1.13.0: DD_CIVISIBILITY_ITR_ENABLED: type: Boolean default: True + description: | Configures the ``CIVisibility`` service to query the Datadog API to decide whether to enable the Datadog `Intelligent Test Runner _`. Setting the variable to ``false`` will skip querying the API and disable code coverage collection and test skipping. + version_added: v1.13.0: DD_CIVISIBILITY_LOG_LEVEL: type: String default: "info" + description: | Configures the ``CIVisibility`` service to replace the default Datadog logger's stream handler with one that only displays messages related to the ``CIVisibility`` service, at a level of or higher than the given log level. The Datadog logger's file handler is unaffected. Valid, case-insensitive, values are ``critical``, ``error``, ``warning``, ``info``, or ``debug``. A value of ``none`` silently disables the logger. Note: enabling debug logging with the ``DD_TRACE_DEBUG`` environment variable overrides this behavior. + version_added: v2.5.0: - DD_APPSEC_AUTOMATED_USER_EVENTS_TRACKING: + DD_TEST_SESSION_NAME: + type: String + default: (autodetected) + + description: | + Configures the ``CIVisibility`` service to use the given string as the value of the ``test_session.name`` tag in + test events. If not specified, this string will be constructed from the CI job id (if available) and the test + command used to start the test session. + + version_added: + v2.16.0: + + +Agent +----- + +.. ddtrace-configuration-options:: + + DD_AGENT_HOST: + type: String + + default: | + ``localhost`` + + description: | + The host name to use to connect the Datadog agent for traces. The host name + can be IPv4, IPv6, or a domain name. If ``DD_TRACE_AGENT_URL`` is specified, the + value of ``DD_AGENT_HOST`` is ignored. + + Example for IPv4: ``DD_AGENT_HOST=192.168.10.1`` + + Example for IPv6: ``DD_AGENT_HOST=2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF`` + + Example for domain name: ``DD_AGENT_HOST=host`` + + version_added: + v0.17.0: + v1.7.0: + + DD_DOGSTATSD_URL: + type: URL + + default: | + ``unix:///var/run/datadog/dsd.socket`` if available + otherwise ``udp://localhost:8125`` + + description: | + The URL to use to connect the Datadog agent for Dogstatsd metrics. The url can start with + ``udp://`` to connect using UDP or with ``unix://`` to use a Unix + Domain Socket. + + Example for UDP url: ``DD_DOGSTATSD_URL=udp://localhost:8125`` + + Example for UDS: ``DD_DOGSTATSD_URL=unix:///var/run/datadog/dsd.socket`` + + DD_PATCH_MODULES: + description: | + Override the modules patched for this execution of the program. Must be + a list in the ``module1:boolean,module2:boolean`` format. For example, + ``boto:true,redis:false``. + + version_added: + v0.55.0: | + Formerly named ``DATADOG_PATCH_MODULES`` + + DD_SITE: + default: datadoghq.com + + description: | + Specify which site to use for uploading profiles and logs. Set to + ``datadoghq.eu`` to use EU site. + + DD_TAGS: + description: | + Set global tags to be attached to every span. Value must be either comma or space separated. e.g. ``key1:value1,key2:value2`` or ``key1:value key2:value2``. + + If a tag value is not supplied the value will be an empty string. e.g. ``key1,key2`` or ``key1 key2``. + + version_added: + v0.38.0: Comma separated support added + v0.48.0: Space separated support added + + DD_TRACE_AGENT_TIMEOUT_SECONDS: + type: Float + default: 2.0 + description: The timeout in float to use to connect to the Datadog agent. + + DD_TRACE_AGENT_URL: + type: URL + + default: | + ``unix:///var/run/datadog/apm.socket`` if available + otherwise ``http://localhost:8126`` + + description: | + The URL to use to connect the Datadog agent for traces. The url can start with + ``http://`` to connect using HTTP or with ``unix://`` to use a Unix + Domain Socket. + + Example for http url: ``DD_TRACE_AGENT_URL=http://localhost:8126`` + + Example for UDS: ``DD_TRACE_AGENT_URL=unix:///var/run/datadog/apm.socket`` + +Logs +---- + +.. ddtrace-configuration-options:: + + DD_LOGS_INJECTION: + type: Boolean + default: False + description: Enables :ref:`Logs Injection`. + + DD_TRACE_DEBUG: + type: Boolean + default: False + + description: | + Enables debug logging in the tracer. + + Can be used with `DD_TRACE_LOG_FILE` to route logs to a file. + + version_added: + v0.41.0: | + Formerly named ``DATADOG_TRACE_DEBUG`` + + DD_TRACE_LOG_FILE: + description: | + Directs `ddtrace` logs to a specific file. Note: The default backup count is 1. For larger logs, use with ``DD_TRACE_LOG_FILE_SIZE_BYTES``. + To fine tune the logging level, use with ``DD_TRACE_LOG_FILE_LEVEL``. + + DD_TRACE_LOG_FILE_LEVEL: + default: DEBUG + + description: | + Configures the ``RotatingFileHandler`` used by the `ddtrace` logger to write logs to a file based on the level specified. + Defaults to `DEBUG`, but will accept the values found in the standard **logging** library, such as WARNING, ERROR, and INFO, + if further customization is needed. Files are not written to unless ``DD_TRACE_LOG_FILE`` has been defined. + + DD_TRACE_LOG_FILE_SIZE_BYTES: + type: Int + default: 15728640 + + description: | + Max size for a file when used with `DD_TRACE_LOG_FILE`. When a log has exceeded this size, there will be one backup log file created. + In total, the files will store ``2 * DD_TRACE_LOG_FILE_SIZE_BYTES`` worth of logs. + + DD_TRACE_STARTUP_LOGS: + type: Boolean + default: False + description: Enable or disable start up diagnostic logging. + +Sampling +-------- + +.. ddtrace-configuration-options:: + + DD_SPAN_SAMPLING_RULES: + type: string + + description: | + A JSON array of objects. Each object must have a "name" and/or "service" field, while the "max_per_second" and "sample_rate" fields are optional. + The "sample_rate" value must be between 0.0 and 1.0 (inclusive), and will default to 1.0 (100% sampled). + The "max_per_second" value must be >= 0 and will default to no limit. + The "service" and "name" fields can be glob patterns: + "*" matches any substring, including the empty string, + "?" matches exactly one of any character, and any other character matches exactly one of itself. + + **Example:** ``DD_SPAN_SAMPLING_RULES='[{"sample_rate":0.5,"service":"my-serv*","name":"flask.re?uest"}]'`` + + version_added: + v1.4.0: + + DD_SPAN_SAMPLING_RULES_FILE: + type: string + + description: | + A path to a JSON file containing span sampling rules organized as JSON array of objects. + For the rules each object must have a "name" and/or "service" field, and the "sample_rate" field is optional. + The "sample_rate" value must be between 0.0 and 1.0 (inclusive), and will default to 1.0 (100% sampled). + The "max_per_second" value must be >= 0 and will default to no limit. + The "service" and "name" fields are glob patterns, where "glob" means: + "*" matches any substring, including the empty string, + "?" matches exactly one of any character, and any other character matches exactly one of itself. + + **Example:** ``DD_SPAN_SAMPLING_RULES_FILE="data/span_sampling_rules.json"'`` + **Example File Contents:** ``[{"sample_rate":0.5,"service":"*-service","name":"my-name-????", "max_per_second":"20"}, {"service":"xy?","name":"a*c"}]`` + + version_added: + v1.4.0: + + DD_TRACE_RATE_LIMIT: + type: int + default: 100 + + description: | + Maximum number of traces per second to sample. Set a rate limit to avoid the ingestion volume overages in the case of traffic spikes. This configuration + is only applied when client based sampling is configured, otherwise agent based rate limits are used. + + version_added: + v0.33.0: + v2.15.0: Only applied when DD_TRACE_SAMPLE_RATE, DD_TRACE_SAMPLING_RULES, or DD_SPAN_SAMPLING_RULE are set. + + DD_TRACE_SAMPLING_RULES: + type: JSON array + + description: | + A JSON array of objects. Each object must have a “sample_rate”, and the “name”, “service”, "resource", and "tags" fields are optional. The “sample_rate” value must be between 0.0 and 1.0 (inclusive). + + **Example:** ``DD_TRACE_SAMPLING_RULES='[{"sample_rate":0.5,"service":"my-service","resource":"my-url","tags":{"my-tag":"example"}}]'`` + + **Note** that the JSON object must be included in single quotes (') to avoid problems with escaping of the double quote (") character.' + + version_added: + v1.19.0: added support for "resource" + v1.20.0: added support for "tags" + v2.8.0: added lazy sampling support, so that spans are evaluated at the end of the trace, guaranteeing more metadata to evaluate against. + +Other +----- + +.. ddtrace-configuration-options:: + + DD_COMPILE_DEBUG: + type: Boolean + default: False + description: Compile Cython extensions in RelWithDebInfo mode (with debug info, but no debug code or asserts) + + DD_INSTRUMENTATION_TELEMETRY_ENABLED: + type: Boolean + default: True + + description: | + Enables sending :ref:`telemetry ` events to the agent. + + DD_RUNTIME_METRICS_ENABLED: + type: Boolean + default: False + + description: | + When used with ``ddtrace-run`` this configuration enables sending runtime metrics to Datadog. + These metrics track the memory management and concurrency of the python runtime. + Refer to the following `docs ` _ for more information. + + DD_SUBPROCESS_SENSITIVE_WILDCARDS: + type: String + + description: | + Add more possible matches to the internal list of subprocess execution argument scrubbing. Must be a comma-separated list and + each item can take `fnmatch` style wildcards, for example: ``*ssn*,*personalid*,*idcard*,*creditcard*``. + + DD_USER_MODEL_EMAIL_FIELD: type: String - default: "safe" + default: "" + description: | - Sets the mode for the automated user login events tracking feature which sets some traces on each user login event. The - supported modes are ``safe`` which will only store the user id or primary key, ``extended`` which will also store - the username, email and full name and ``disabled``. Note that this feature requires ``DD_APPSEC_ENABLED`` to be - set to ``true`` to work. + Field to be used to read the user email when using a custom ``User`` model for the automatic login events. This field will take precedence over automatic inference. + version_added: - v1.17.0: Added support to the Django integration. No other integrations support this configuration. + v1.15.0: DD_USER_MODEL_LOGIN_FIELD: type: String default: "" + description: | Field to be used to read the user login when using a custom ``User`` model for the automatic login events. This field will take precedence over automatic inference. Please note that, if set, this field will be used to retrieve the user login even if ``DD_APPSEC_AUTOMATED_USER_EVENTS_TRACKING`` is set to ``safe`` and, in some cases, the selected field could hold potentially private information. - version_added: - v1.15.0: - - DD_USER_MODEL_EMAIL_FIELD: - type: String - default: "" - description: | - Field to be used to read the user email when using a custom ``User`` model for the automatic login events. This field will take precedence over automatic inference. + version_added: v1.15.0: DD_USER_MODEL_NAME_FIELD: type: String default: "" + description: | Field to be used to read the user name when using a custom ``User`` model for the automatic login events. This field will take precedence over automatic inference. + version_added: v1.15.0: - DD_TRACE_SPAN_TRACEBACK_MAX_SIZE: - type: Integer - default: 30 - description: | - The maximum length of a traceback included in a span. - version_added: - v2.3.0: - - DD_BOTOCORE_SERVICE: - type: String - default: "aws" - description: | - Set the service name, allowing default service name overrides for traces in botocore. - version_added: - v2.11.0: - - DD_BOTOCORE_PROPAGATION_ENABLED: - type: Boolean - default: False - description: | - Enables trace context propagation connecting producer and consumer spans within a single trace for AWS SQS, SNS, and Kinesis messaging services. - version_added: - v2.6.0: - - DD_BOTOCORE_EMPTY_POLL_ENABLED: - type: Boolean - default: True - description: | - Enables creation of consumer span when AWS SQS and AWS Kinesis ``poll()`` operations return no records. When disabled, no consumer span is created - if no records are returned. - version_added: - v2.6.0: - .. _Unified Service Tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging/ diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index cb88fa64a33..ed29d8fd07d 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -92,6 +92,7 @@ entrypoints env enqueuer eol +eventbridge exec fastapi formatter @@ -123,6 +124,7 @@ JSON jinja js kafka +kinesis kombu kubernetes kwarg @@ -218,9 +220,11 @@ sanic screenshots serializable sha +sns sql sqlalchemy sqlite +sqs stacktrace starlette statsd diff --git a/lib-injection/sources/sitecustomize.py b/lib-injection/sources/sitecustomize.py index 6c9b38e6dca..4ad07f4c60e 100644 --- a/lib-injection/sources/sitecustomize.py +++ b/lib-injection/sources/sitecustomize.py @@ -48,6 +48,16 @@ def parse_version(version: str) -> Tuple: EXECUTABLE_DENY_LOCATION = os.path.abspath(os.path.join(SCRIPT_DIR, "denied_executables.txt")) +def get_oci_ddtrace_version(): + version_path = os.path.join(SCRIPT_DIR, "version") + try: + with open(version_path, "r") as version_file: + return version_file.read().strip() + except Exception: + _log("Failed to read version file %s" % (version_path,), level="debug") + return "unknown" + + def build_installed_pkgs(): installed_packages = {} if sys.version_info >= (3, 8): @@ -106,14 +116,14 @@ def create_count_metric(metric, tags=None): } -def gen_telemetry_payload(telemetry_events): +def gen_telemetry_payload(telemetry_events, ddtrace_version="unknown"): return { "metadata": { "language_name": "python", "language_version": PYTHON_VERSION, "runtime_name": PYTHON_RUNTIME, "runtime_version": PYTHON_VERSION, - "tracer_version": INSTALLED_PACKAGES.get("ddtrace", "unknown"), + "tracer_version": ddtrace_version, "pid": os.getpid(), }, "points": telemetry_events, @@ -181,6 +191,12 @@ def package_is_compatible(package_name, package_version): def get_first_incompatible_sysarg(): + # bug: sys.argv is not always available in all python versions + # https://bugs.python.org/issue32573 + if not hasattr(sys, "argv"): + _log("sys.argv not available, skipping sys.argv check", level="debug") + return + _log(f"Checking sysargs: len(argv): {len(sys.argv)}", level="debug") if len(sys.argv) <= 1: return @@ -197,6 +213,7 @@ def _inject(): global PYTHON_RUNTIME global PKGS_ALLOW_LIST global EXECUTABLES_DENY_LIST + DDTRACE_VERSION = get_oci_ddtrace_version() INSTALLED_PACKAGES = build_installed_pkgs() PYTHON_RUNTIME = platform.python_implementation().lower() PYTHON_VERSION = platform.python_version() @@ -291,7 +308,7 @@ def _inject(): ], ) ) - telemetry_event = gen_telemetry_payload(telemetry_data) + telemetry_event = gen_telemetry_payload(telemetry_data, DDTRACE_VERSION) send_telemetry(telemetry_event) return @@ -341,12 +358,14 @@ def _inject(): "injection_forced:" + str(runtime_incomp or integration_incomp).lower(), ], ) - ] + ], + DDTRACE_VERSION, ) send_telemetry(event) except Exception as e: event = gen_telemetry_payload( - [create_count_metric("library_entrypoint.error", ["error_type:" + type(e).__name__.lower()])] + [create_count_metric("library_entrypoint.error", ["error_type:" + type(e).__name__.lower()])], + DDTRACE_VERSION, ) send_telemetry(event) _log("failed to load ddtrace.bootstrap.sitecustomize: %s" % e, level="error") diff --git a/releasenotes/notes/add-aws-payload-tagging-d01f0033c7e1f5c0.yaml b/releasenotes/notes/add-aws-payload-tagging-d01f0033c7e1f5c0.yaml new file mode 100644 index 00000000000..4a71119eb80 --- /dev/null +++ b/releasenotes/notes/add-aws-payload-tagging-d01f0033c7e1f5c0.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for expanding AWS request/response Payloads into flattened span tags. + diff --git a/releasenotes/notes/fix-lib-injection-sys-argv-fefc775a4a262d86.yaml b/releasenotes/notes/fix-lib-injection-sys-argv-fefc775a4a262d86.yaml new file mode 100644 index 00000000000..ab61e914fb5 --- /dev/null +++ b/releasenotes/notes/fix-lib-injection-sys-argv-fefc775a4a262d86.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + lib-injection: Fix injection guardrail check when ``sys.argv`` is not available. diff --git a/releasenotes/notes/profiling-reduce-lock-profiler-log-spam-5931d957ff1becd4.yaml b/releasenotes/notes/profiling-reduce-lock-profiler-log-spam-5931d957ff1becd4.yaml new file mode 100644 index 00000000000..5a9fe3a174d --- /dev/null +++ b/releasenotes/notes/profiling-reduce-lock-profiler-log-spam-5931d957ff1becd4.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + profiling: The lock profiler would log a warning if it couldn't determine a + name for a lock, and it would try determining a name multiple times for + the same lock. This lead to excessive log spam. Downgrade this to a + debug log and only try to determine the name once. diff --git a/releasenotes/notes/remove_x-forwarded_from_ip_resolution-d63292c0fda42dd9.yaml b/releasenotes/notes/remove_x-forwarded_from_ip_resolution-d63292c0fda42dd9.yaml new file mode 100644 index 00000000000..19bbb487281 --- /dev/null +++ b/releasenotes/notes/remove_x-forwarded_from_ip_resolution-d63292c0fda42dd9.yaml @@ -0,0 +1,5 @@ +--- +other: + - | + Tracer: Removed x-forwarded from headers used for client IP resolution (but not from collected headers). + We lack evidence of actual usage, and whether this should follow RFC 7239 or regular XFF list format. diff --git a/riotfile.py b/riotfile.py index c31945aedf3..4de6347f85a 100644 --- a/riotfile.py +++ b/riotfile.py @@ -120,6 +120,14 @@ def select_pys(min_version=MIN_PYTHON_VERSION, max_version=MAX_PYTHON_VERSION): "ruamel.yaml": latest, }, ), + Venv( + name="gitlab-gen-config", + command="python scripts/gen_gitlab_config.py {cmdargs}", + pys=["3"], + pkgs={ + "ruamel.yaml": latest, + }, + ), Venv( name="appsec", pys=select_pys(), diff --git a/scripts/gen_circleci_config.py b/scripts/gen_circleci_config.py index 1254081fd93..4afbec47cc1 100644 --- a/scripts/gen_circleci_config.py +++ b/scripts/gen_circleci_config.py @@ -8,8 +8,9 @@ import typing as t -def gen_required_suites(template: dict, git_selections: list) -> None: +def gen_required_suites(template: dict) -> None: """Generate the list of test suites that need to be run.""" + from needs_testrun import extract_git_commit_selections from needs_testrun import for_each_testrun_needed as fetn from suitespec import get_suites @@ -18,7 +19,9 @@ def gen_required_suites(template: dict, git_selections: list) -> None: required_suites = template["requires_tests"]["requires"] = [] fetn( - suites=sorted(suites & jobs), action=lambda suite: required_suites.append(suite), git_selections=git_selections + suites=sorted(suites & jobs), + action=lambda suite: required_suites.append(suite), + git_selections=extract_git_commit_selections(os.getenv("GIT_COMMIT_DESC", "")), ) if not required_suites: @@ -98,15 +101,6 @@ def gen_build_docs(template: dict) -> None: template["workflows"]["test"]["jobs"].append({"build_docs": template["requires_pre_check"]}) -def extract_git_commit_selections(git_commit_message: str) -> dict: - """Extract the selected suites from git commit message.""" - suites = set() - for token in git_commit_message.split(): - if token.lower().startswith("circleci:"): - suites.update(token[len("circleci:") :].lower().split(",")) - return list(sorted(suites)) - - # ----------------------------------------------------------------------------- # The code below is the boilerplate that makes the script work. There is @@ -142,7 +136,6 @@ def extract_git_commit_selections(git_commit_message: str) -> dict: with YAML(output=CONFIG_GEN_FILE) as yaml: LOGGER.info("Loading configuration template from %s", CONFIG_TEMPLATE_FILE) config = yaml.load(CONFIG_TEMPLATE_FILE) - git_commit_selections = extract_git_commit_selections(os.getenv("GIT_COMMIT_DESC")) has_error = False LOGGER.info("Configuration generation steps:") @@ -151,10 +144,7 @@ def extract_git_commit_selections(git_commit_message: str) -> dict: desc = func.__doc__.splitlines()[0] try: start = time() - if name == "gen_required_suites": - func(config, git_commit_selections) - else: - func(config) + func(config) end = time() LOGGER.info("- %s: %s [took %dms]", name, desc, int((end - start) / 1e6)) except Exception as e: diff --git a/scripts/gen_gitlab_config.py b/scripts/gen_gitlab_config.py new file mode 100644 index 00000000000..2c7e5a5b6c2 --- /dev/null +++ b/scripts/gen_gitlab_config.py @@ -0,0 +1,153 @@ +# This script is used to generate the GitLab dynamic config file in +# .gitlab/tests.yml. +# +# To add new configuration manipulations that are based on top of the template +# file in .gitlab/tests.yml, add a function named gen_ to this +# file. The function will be called automatically when this script is run. + +from dataclasses import dataclass +import typing as t + + +@dataclass +class JobSpec: + name: str + runner: str + is_snapshot: bool = False + services: t.Optional[t.Set[str]] = None + env: t.Optional[t.Dict[str, str]] = None + parallelism: t.Optional[int] = None + retry: t.Optional[int] = None + timeout: t.Optional[int] = None + + def __str__(self) -> str: + lines = [] + base = f".test_base_{self.runner}" + if self.is_snapshot: + base += "_snapshot" + + lines.append(f"{self.name}:") + lines.append(f" extends: {base}") + + if self.services: + lines.append(" services:") + + _services = [f"!reference [.services, {_}]" for _ in self.services] + if self.is_snapshot: + _services.insert(0, f"!reference [{base}, services]") + + for service in _services: + lines.append(f" - {service}") + + if self.env: + lines.append(" variables:") + for key, value in self.env.items(): + lines.append(f" {key}: {value}") + + if self.parallelism is not None: + lines.append(f" parallel: {self.parallelism}") + + if self.retry is not None: + lines.append(f" retry: {self.retry}") + + if self.timeout is not None: + lines.append(f" timeout: {self.timeout}") + + return "\n".join(lines) + + +def collect_jobspecs() -> dict: + # Recursively search for jobspec.yml in TESTS + jobspecs = {} + for js in TESTS.rglob("jobspec.yml"): + with YAML() as yaml: + LOGGER.info("Loading jobspecs from %s", js) + jobspecs.update(yaml.load(js)) + + return jobspecs + + +def gen_required_suites() -> None: + """Generate the list of test suites that need to be run.""" + from needs_testrun import extract_git_commit_selections + from needs_testrun import for_each_testrun_needed as fetn + from suitespec import get_suites + + jobspecs = collect_jobspecs() + + suites = get_suites() + required_suites = [] + + for suite in suites: + if suite not in jobspecs: + print(f"WARNING: Suite {suite} has no jobspec", file=sys.stderr) + continue + + for job in jobspecs: + if job not in suites: + print(f"WARNING: Job {job} has no suitespec", file=sys.stderr) + continue + + fetn( + suites=sorted(suites), + action=lambda suite: required_suites.append(suite), + git_selections=extract_git_commit_selections(os.getenv("CI_COMMIT_MESSAGE", "")), + ) + + # Copy the template file + (GITLAB / "tests-gen.yml").write_text( + (GITLAB / "tests.yml").read_text().replace(r"{{services.yml}}", (GITLAB / "services.yml").read_text()) + ) + + # Generate the list of suites to run + with (GITLAB / "tests-gen.yml").open("a") as f: + for suite in required_suites: + if suite not in jobspecs: + print(f"Suite {suite} not found in jobspec", file=sys.stderr) + continue + + print(str(JobSpec(suite, **jobspecs[suite])), file=f) + + +# ----------------------------------------------------------------------------- + +# The code below is the boilerplate that makes the script work. There is +# generally no reason to modify it. + +import logging # noqa +import os # noqa +import sys # noqa +from argparse import ArgumentParser # noqa +from pathlib import Path # noqa +from time import monotonic_ns as time # noqa + +from ruamel.yaml import YAML # noqa + +logging.basicConfig(level=logging.WARNING, format="%(levelname)s: %(message)s") +LOGGER = logging.getLogger(__name__) + +argp = ArgumentParser() +argp.add_argument("--verbose", "-v", action="store_true", help="Verbose output") +args = argp.parse_args() +if args.verbose: + LOGGER.setLevel(logging.INFO) + +ROOT = Path(__file__).parents[1] +GITLAB = ROOT / ".gitlab" +TESTS = ROOT / "tests" +# Make the scripts and tests folders available for importing. +sys.path.append(str(ROOT / "scripts")) +sys.path.append(str(ROOT / "tests")) + +LOGGER.info("Configuration generation steps:") +for name, func in dict(globals()).items(): + if name.startswith("gen_"): + desc = func.__doc__.splitlines()[0] + try: + start = time() + func() + end = time() + LOGGER.info("- %s: %s [took %dms]", name, desc, int((end - start) / 1e6)) + except Exception as e: + LOGGER.error("- %s: %s [reason: %s]", name, desc, str(e), exc_info=True) + has_error = True diff --git a/scripts/needs_testrun.py b/scripts/needs_testrun.py index 0ea8235622b..ecac780e328 100755 --- a/scripts/needs_testrun.py +++ b/scripts/needs_testrun.py @@ -12,6 +12,7 @@ from subprocess import check_output import sys import typing as t +from urllib.parse import urlencode from urllib.request import Request from urllib.request import urlopen @@ -68,6 +69,45 @@ def get_latest_commit_message() -> str: return "" +GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN") +if not GITHUB_TOKEN: + try: + GITHUB_TOKEN = ( + check_output( + [ + "aws", + "ssm", + "get-parameter", + "--region", + "us-east-1", + "--name", + f'ci.{os.environ["CI_PROJECT_NAME"]}.gh_token', + "--with-decryption", + "--query", + "Parameter.Value", + "--output=text", + ] + ) + .decode("utf-8") + .strip() + ) + LOGGER.info("GitHub token retrieved from SSM") + except Exception: + LOGGER.warning("No GitHub token available. Changes may not be detected accurately.", exc_info=True) +else: + LOGGER.info("GitHub token retrieved from environment") + + +def github_api(path: str, query: t.Optional[dict] = None) -> t.Any: + url = f"https://api.github.com/repos/datadog/dd-trace-py{path}" + headers = {"Accept": "application/vnd.github+json"} + if GITHUB_TOKEN: + headers["Authorization"] = f"Bearer {GITHUB_TOKEN}" + if query is not None: + url += "?" + urlencode(query) + return json.load(urlopen(Request(url, headers=headers))) + + @cache def get_changed_files(pr_number: int, sha: t.Optional[str] = None) -> t.Set[str]: """Get the files changed in a PR @@ -85,9 +125,7 @@ def get_changed_files(pr_number: int, sha: t.Optional[str] = None) -> t.Set[str] files = set() try: for page in count(1): - url = f"https://api.github.com/repos/datadog/dd-trace-py/pulls/{pr_number}/files?page={page}" - headers = {"Accept": "application/vnd.github+json"} - result = {_["filename"] for _ in json.load(urlopen(Request(url, headers=headers)))} + result = {_["filename"] for _ in github_api(f"/pulls/{pr_number}/files", {"page": page})} if not result: return files files |= result @@ -151,35 +189,35 @@ def needs_testrun(suite: str, pr_number: int, sha: t.Optional[str] = None) -> bo return bool(matches) -def _get_pr_number(): +def _get_pr_number() -> int: + # CircleCI number = os.environ.get("CIRCLE_PR_NUMBER") - if not number: - pr_url = os.environ.get("CIRCLE_PULL_REQUEST", "") - number = pr_url.split("/")[-1] - try: + if number is not None: return int(number) - except ValueError: - return 0 + pr_url = os.environ.get("CIRCLE_PULL_REQUEST") + if pr_url is not None: + return int(pr_url.split("/")[-1]) -def for_each_testrun_needed(suites: t.List[str], action: t.Callable[[str], None], git_selections: t.List[str]): - # Used in CircleCI config - pr_number = _get_pr_number() + # GitLab + ref_name = os.environ.get("CI_COMMIT_REF_NAME") + if ref_name is not None: + return int(github_api("/pulls", {"head": f"datadog:{ref_name}"})[0]["number"]) - for suite in suites: - if pr_number <= 0: - # If we don't have a valid PR number we run all tests - action(suite) - continue + raise RuntimeError("Could not determine PR number") - if any(x in git_selections for x in ("all", suite)): - # If "all" or current suite is annotated - # in git commit we run the suite - action(suite) - continue - needs_run = needs_testrun(suite, pr_number) - if needs_run: +def for_each_testrun_needed(suites: t.List[str], action: t.Callable[[str], None], git_selections: t.Set[str]): + try: + pr_number = _get_pr_number() + except Exception: + pr_number = None + + for suite in suites: + # If we don't have a valid PR number we run all tests + # or "all" or current suite is annotated in git commit we run the suite + # or the suite needs to be run based on the changed files + if pr_number is None or (git_selections & {"all", suite}) or needs_testrun(suite, pr_number): action(suite) @@ -192,6 +230,15 @@ def pr_matches_patterns(patterns: t.Set[str]) -> bool: return bool([_ for p in patterns for _ in fnmatch.filter(changed_files, p)]) +def extract_git_commit_selections(git_commit_message: str) -> t.Set[str]: + """Extract the selected suites from git commit message.""" + suites = set() + for token in git_commit_message.split(): + if token.lower().startswith("circleci:"): + suites.update(token[len("circleci:") :].lower().split(",")) + return suites + + def main() -> bool: argp = ArgumentParser() diff --git a/tests/.suitespec.json b/tests/.suitespec.json index 773e41b366d..1e85eff0ffa 100644 --- a/tests/.suitespec.json +++ b/tests/.suitespec.json @@ -23,7 +23,10 @@ "tests/subprocesstest.py", "tests/wait-for-services.py", "tests/webclient.py", - "tests/test_module/*" + "tests/test_module/*", + ".gitlab-ci.yml", + ".gitlab/*", + "tests/jobspec.yml" ], "core": [ "ddtrace/internal/__init__.py", @@ -133,7 +136,8 @@ "ddtrace/settings/_database_monitoring.py", "tests/contrib/patch.py", "tests/contrib/config.py", - "tests/contrib/__init__.py" + "tests/contrib/__init__.py", + "tests/contrib/jobspec.yml" ], "redis": [ "ddtrace/contrib/rediscluster/*", @@ -788,12 +792,7 @@ "@bootstrap", "@core", "@profiling", - "tests/profiling/*" - ], - "profile-v2": [ - "@bootstrap", - "@core", - "@profiling", + "tests/profiling/*", "tests/profiling_v2/*" ], "vendor": [ @@ -835,45 +834,6 @@ "@loguru", "tests/contrib/loguru/*" ], - "asyncio": [ - "@bootstrap", - "@core", - "@contrib", - "@tracing", - "@asyncio" - ], - "futures": [ - "@bootstrap", - "@core", - "@contrib", - "@tracing", - "@futures" - ], - "sqlite3": [ - "@bootstrap", - "@core", - "@contrib", - "@tracing", - "@sqlite3", - "@dbapi" - ], - "dbapi": [ - "@bootstrap", - "@core", - "@contrib", - "@tracing", - "@dbapi", - "@appsec", - "tests/contrib/shared_tests.py" - ], - "dbapi_async": [ - "@bootstrap", - "@core", - "@contrib", - "@tracing", - "@dbapi", - "tests/contrib/shared_tests_async.py" - ], "asyncpg": [ "@bootstrap", "@core", diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 00000000000..20475da6965 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,38 @@ +# Dynamic Job Runs + +This repository makes use of dynamic features of CI providers to run only the +jobs that are necessary for the changes in the pull request. This is done by +giving a logical description of the test suites in terms of _components_. A +component is a logical grouping of tests that can be run independently of other +components. For example, a component could be a test suite for a specific +package, or a test suite for a specific feature, e.g. the tracer, the profiler, +etc... . Components may depend on each other, provide that the resulting +dependency graph is acyclic. For example, the library has a _core_ component +on which most, if not all, other components depend. When a change is made to the +core component, all other components depending on it must be re-tested. This +logical description is contained in the `/tests/.suitespec.json` file. + +The jobs to be run are defined in `jobspec.yml` files under the `/tests` +sub-tree. Each entry has the name of a suite from the suitespec file, and +defines the parameters for the runner image. The general structure of a job spec +is + +```yaml +suite_name: # A suite name from the .suitespec.json file + runner: # The runner image to use (riot | hatch) + is_snapshot: # Whether this is a snapshot test run (optional, defaults to false) + services: # Any services to spawn for the tests (e.g. redis, optional) + env: # Environment variables to set for the tests + SUITE_NAME: # The suite pattern from the riotfile|hatch env names (mandatory) + parallelism: # The parallel degree of the job (optional, defaults to 1) + retry: # The number of retries for the job (optional) + timeout: # The timeout for the job (optional) +``` + +To create a new job for a new test suite, create a new suite entry in the +`.suitespec.json` file and declare the file patterns that should trigger it. +Then create a new job in a `jobspec.yml` file under the `/tests` sub-tree with +the format described above, matching the name of the suite in the +`.suitespec.json` just created. The `SUITE_NAME` environment variable must be +a regular expression matching at least one riotfile environment (for the riot +runner) or a hatch environment (for the hatch runner). diff --git a/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py b/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py index 4b0812b4807..6a10f2b53f4 100644 --- a/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py +++ b/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py @@ -146,7 +146,7 @@ def _uninstall_watchdog_and_reload(): del sys.modules["tests.appsec.iast.fixtures.entrypoint.views"] -@pytest.mark.subprocess() +@pytest.mark.subprocess(check_logs=False) def test_ddtrace_iast_flask_app_create_app_patch_all_enable_iast_propagation(): import dis import io diff --git a/tests/appsec/jobspec.yml b/tests/appsec/jobspec.yml new file mode 100644 index 00000000000..bea6d7b77ec --- /dev/null +++ b/tests/appsec/jobspec.yml @@ -0,0 +1,65 @@ +appsec: + runner: riot + is_snapshot: true + env: + SUITE_NAME: "appsec$" + parallelism: 6 + retry: 2 +appsec_iast: + runner: riot + is_snapshot: true + services: + - postgres + env: + SUITE_NAME: "appsec_iast$" + TEST_POSTGRES_HOST: postgres + parallelism: 6 + retry: 2 + timeout: 25m +appsec_iast_tdd_propagation: + runner: riot + is_snapshot: true + env: + SUITE_NAME: appsec_iast_tdd_propagation + parallelism: 2 + retry: 2 +appsec_iast_memcheck: + runner: riot + is_snapshot: true + env: + SUITE_NAME: appsec_iast_memcheck + CI_DEBUG_TRACE: "true" + PYTEST_ADDOPTS: "-v -s" + parallelism: 4 + retry: 2 +appsec_threats_django: + runner: hatch + env: + SUITE_NAME: appsec_threats_django + parallelism: 12 + retry: 2 +appsec_threats_flask: + runner: hatch + env: + SUITE_NAME: appsec_threats_flask + parallelism: 10 + retry: 2 +appsec_threats_fastapi: + runner: hatch + env: + SUITE_NAME: appsec_threats_fastapi + parallelism: 9 + retry: 2 +appsec_aggregated_leak_testing: + runner: hatch + env: + SUITE_NAME: appsec_aggregated_leak_testing + parallelism: 3 + retry: 2 + timeout: 35m +appsec_iast_native: + runner: hatch + env: + SUITE_NAME: appsec_iast_native + parallelism: 6 + retry: 2 diff --git a/tests/ci_visibility/jobspec.yml b/tests/ci_visibility/jobspec.yml new file mode 100644 index 00000000000..fc08931eda9 --- /dev/null +++ b/tests/ci_visibility/jobspec.yml @@ -0,0 +1,29 @@ +ci_visibility: + runner: riot + is_snapshot: true + env: + SUITE_NAME: ci_visibility + parallelism: 4 +dd_coverage: + runner: hatch + is_snapshot: true + env: + SUITE_NAME: dd_coverage + parallelism: 6 +pytest: + runner: riot + is_snapshot: true + env: + SUITE_NAME: pytest + parallelism: 12 +pytest_v2: + runner: hatch + is_snapshot: true + env: + SUITE_NAME: pytest_plugin_v2 + parallelism: 10 +unittest: + runner: riot + is_snapshot: true + env: + SUITE_NAME: unittest diff --git a/tests/ci_visibility/test_ci_visibility.py b/tests/ci_visibility/test_ci_visibility.py index b3de47a82b5..ba9c7b5359d 100644 --- a/tests/ci_visibility/test_ci_visibility.py +++ b/tests/ci_visibility/test_ci_visibility.py @@ -528,7 +528,7 @@ def test_git_do_request_agentless(git_repo): {"mock_header_name": "mock_header_value"}, ) - mock_get_connection.assert_called_once_with("http://base_url/repository/endpoint", timeout=20) + mock_get_connection.assert_any_call("http://base_url/repository/endpoint", timeout=20) mock_http_connection.request.assert_called_once_with( "POST", "/repository/endpoint", @@ -561,7 +561,7 @@ def test_git_do_request_evp(git_repo): {"mock_header_name": "mock_header_value"}, ) - mock_get_connection.assert_called_once_with("https://base_url/repository/endpoint", timeout=20) + mock_get_connection.assert_any_call("https://base_url/repository/endpoint", timeout=20) mock_http_connection.request.assert_called_once_with( "POST", "/repository/endpoint", @@ -607,7 +607,7 @@ def test_civisibilitywriter_coverage_agentless_url(self): with mock.patch("ddtrace.internal.writer.writer.get_connection") as _get_connection: _get_connection.return_value.getresponse.return_value.status = 200 dummy_writer._put("", {}, cov_client, no_trace=True) - _get_connection.assert_called_once_with("https://citestcov-intake.datadoghq.com", 2.0) + _get_connection.assert_any_call("https://citestcov-intake.datadoghq.com", 2.0) def test_civisibilitywriter_coverage_agentless_with_intake_url_param(self): ddtrace.internal.ci_visibility.writer.config._ci_visibility_agentless_url = "" diff --git a/tests/conftest.py b/tests/conftest.py index be1790e432f..c4c21caddf0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -299,6 +299,7 @@ def run_function_from_file(item, params=None): args = [sys.executable] timeout = marker.kwargs.get("timeout", None) + check_logs = marker.kwargs.get("check_logs", True) # Add ddtrace-run prefix in ddtrace-run mode if marker.kwargs.get("ddtrace_run", False): @@ -367,10 +368,16 @@ def _subprocess_wrapper(): ) if not is_stream_ok(out, expected_out): - raise AssertionError("STDOUT: Expected [%s] got [%s]" % (expected_out, out)) + if check_logs: + raise AssertionError("STDOUT: Expected [%s] got [%s]" % (expected_out, out)) + else: + pytest.xfail("STDOUT: Expected [%s] got [%s]" % (expected_out, out)) if not is_stream_ok(err, expected_err): - raise AssertionError("STDERR: Expected [%s] got [%s]" % (expected_err, err)) + if check_logs: + raise AssertionError("STDERR: Expected [%s] got [%s]" % (expected_err, err)) + else: + pytest.xfail("STDOUT: Expected [%s] got [%s]" % (expected_out, out)) return _subprocess_wrapper() finally: diff --git a/tests/contrib/botocore/test.py b/tests/contrib/botocore/test.py index 657924be005..4910f32ac46 100644 --- a/tests/contrib/botocore/test.py +++ b/tests/contrib/botocore/test.py @@ -55,6 +55,16 @@ # Parse botocore.__version_ from "1.9.0" to (1, 9, 0) BOTOCORE_VERSION = parse_version(botocore.__version__) +# Span data which isn't static to ignore in the snapshots. +snapshot_ignores = [ + "meta.aws.response.body.HTTPHeaders.date", + "meta.aws.requestid", + "meta.aws.response.body.RequestId", + "meta.aws.response.body.HTTPHeaders.content-length", + "meta.aws.response.body.HTTPHeaders.x-amzn-requestid", + "meta.error.stack", +] + def get_zip_lambda(): code = """ @@ -3765,3 +3775,281 @@ def test_schematized_unspecified_service_secretsmanager_v1(self): assert span.service == DEFAULT_SPAN_SERVICE_NAME assert span.name == "aws.secretsmanager.request" + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_sqs + def test_aws_payload_tagging_sqs(self): + with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + message_attributes = { + "one": {"DataType": "String", "StringValue": "one"}, + "two": {"DataType": "String", "StringValue": "two"}, + "three": {"DataType": "String", "StringValue": "three"}, + "four": {"DataType": "String", "StringValue": "four"}, + "five": {"DataType": "String", "StringValue": "five"}, + "six": {"DataType": "String", "StringValue": "six"}, + "seven": {"DataType": "String", "StringValue": "seven"}, + "eight": {"DataType": "String", "StringValue": "eight"}, + "nine": {"DataType": "String", "StringValue": "nine"}, + "ten": {"DataType": "String", "StringValue": "ten"}, + } + self.sqs_client.send_message( + QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world", MessageAttributes=message_attributes + ) + spans = self.get_spans() + assert spans + assert len(spans) == 1 + span = spans[0] + assert span.get_tag("aws.region") == "us-east-1" + assert span.get_tag("region") == "us-east-1" + assert span.get_tag("aws.operation") == "SendMessage" + assert span.get_tag("params.MessageBody") is None + assert span.get_tag("component") == "botocore" + assert span.get_tag("span.kind"), "client" + assert_is_measured(span) + assert_span_http_status_code(span, 200) + assert span.service == "test-botocore-tracing.sqs" + assert span.resource == "sqs.sendmessage" + trace_json = span.get_tag("params.MessageAttributes._datadog.StringValue") + assert trace_json is None + response = self.sqs_client.receive_message( + QueueUrl=self.sqs_test_queue["QueueUrl"], + MessageAttributeNames=["_datadog"], + WaitTimeSeconds=2, + ) + assert len(response["Messages"]) == 1 + trace_in_message = "MessageAttributes" in response["Messages"][0] + assert trace_in_message is False + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_sns + @mock_sqs + def test_aws_payload_tagging_sns(self): + with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): + region = "us-east-1" + sns = self.session.create_client("sns", region_name=region, endpoint_url="http://localhost:4566") + + topic = sns.create_topic(Name="testTopic") + + topic_arn = topic["TopicArn"] + sqs_url = self.sqs_test_queue["QueueUrl"] + url_parts = sqs_url.split("/") + sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) + sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) + + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + + message_attributes = { + "one": {"DataType": "String", "StringValue": "one"}, + "two": {"DataType": "String", "StringValue": "two"}, + "three": {"DataType": "String", "StringValue": "three"}, + "four": {"DataType": "String", "StringValue": "four"}, + "five": {"DataType": "String", "StringValue": "five"}, + "six": {"DataType": "String", "StringValue": "six"}, + "seven": {"DataType": "String", "StringValue": "seven"}, + "eight": {"DataType": "String", "StringValue": "eight"}, + "nine": {"DataType": "String", "StringValue": "nine"}, + "ten": {"DataType": "String", "StringValue": "ten"}, + } + entries = [ + {"Id": "1", "Message": "ironmaiden", "MessageAttributes": message_attributes}, + {"Id": "2", "Message": "megadeth", "MessageAttributes": message_attributes}, + ] + sns.publish_batch(TopicArn=topic_arn, PublishBatchRequestEntries=entries) + self.get_spans() + + # get SNS messages via SQS + self.sqs_client.receive_message( + QueueUrl=self.sqs_test_queue["QueueUrl"], + MessageAttributeNames=["_datadog"], + WaitTimeSeconds=2, + ) + + # clean up resources + sns.delete_topic(TopicArn=topic_arn) + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_sns + @mock_sqs + def test_aws_payload_tagging_sns_valid_config(self): + with self.override_config( + "botocore", + dict( + payload_tagging_request="$..PublishBatchRequestEntries.[*].Message,$..PublishBatchRequestEntries.[*].Id", + payload_tagging_response="$..HTTPHeaders.*", + ), + ): + region = "us-east-1" + sns = self.session.create_client("sns", region_name=region, endpoint_url="http://localhost:4566") + + topic = sns.create_topic(Name="testTopic") + + topic_arn = topic["TopicArn"] + sqs_url = self.sqs_test_queue["QueueUrl"] + url_parts = sqs_url.split("/") + sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) + sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) + + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + + message_attributes = { + "one": {"DataType": "String", "StringValue": "one"}, + "two": {"DataType": "String", "StringValue": "two"}, + "three": {"DataType": "String", "StringValue": "three"}, + "f.our": {"DataType": "String", "StringValue": "four"}, + "five": {"DataType": "String", "StringValue": "five"}, + "six": {"DataType": "String", "StringValue": "six"}, + "seven": {"DataType": "String", "StringValue": "seven"}, + "eight": {"DataType": "String", "StringValue": "eight"}, + "nine": {"DataType": "String", "StringValue": "nine"}, + "ten": {"DataType": "String", "StringValue": "ten"}, + } + entries = [ + {"Id": "1", "Message": "ironmaiden", "MessageAttributes": message_attributes}, + {"Id": "2", "Message": "megadeth", "MessageAttributes": message_attributes}, + ] + sns.publish_batch(TopicArn=topic_arn, PublishBatchRequestEntries=entries) + self.get_spans() + + # get SNS messages via SQS + self.sqs_client.receive_message( + QueueUrl=self.sqs_test_queue["QueueUrl"], + MessageAttributeNames=["_datadog"], + WaitTimeSeconds=2, + ) + + # clean up resources + sns.delete_topic(TopicArn=topic_arn) + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_s3 + def test_aws_payload_tagging_s3(self): + with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): + s3 = self.session.create_client("s3", region_name="us-west-2") + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + + s3.list_buckets() + s3.list_buckets() + + spans = self.get_spans() + assert spans + span = spans[0] + assert len(spans) == 2 + assert_is_measured(span) + assert span.get_tag("aws.operation") == "ListBuckets" + assert span.get_tag("component") == "botocore" + assert span.get_tag("span.kind"), "client" + assert_span_http_status_code(span, 200) + assert span.service == "test-botocore-tracing.s3" + assert span.resource == "s3.listbuckets" + + assert not span._links, "no links, i.e. no span pointers" + + # testing for span error + self.reset() + with pytest.raises(Exception): + s3.list_objects(bucket="mybucket") + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_s3 + def test_aws_payload_tagging_s3_invalid_config(self): + with self.override_config( + "botocore", + dict(payload_tagging_request="non_json_path", payload_tagging_response="$..Attr ibutes.PlatformCredential"), + ): + s3 = self.session.create_client("s3", region_name="us-west-2") + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + + s3.list_buckets() + s3.list_buckets() + + # testing for span error + self.reset() + with pytest.raises(Exception): + s3.list_objects(bucket="mybucket") + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_s3 + def test_aws_payload_tagging_s3_valid_config(self): + with self.override_config( + "botocore", dict(payload_tagging_request="$..bucket", payload_tagging_response="$..HTTPHeaders") + ): + s3 = self.session.create_client("s3", region_name="us-west-2") + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + + s3.list_buckets() + s3.list_buckets() + + # testing for span error + self.reset() + with pytest.raises(Exception): + s3.list_objects(bucket="mybucket") + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_events + def test_aws_payload_tagging_eventbridge(self): + with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): + bridge = self.session.create_client("events", region_name="us-east-1", endpoint_url="http://localhost:4566") + bridge.create_event_bus(Name="a-test-bus") + + entries = [ + { + "Source": "another-event-source", + "DetailType": "a-different-event-detail-type", + "Detail": json.dumps({"abc": "xyz"}), + "EventBusName": "a-test-bus", + }, + { + "Source": "some-event-source", + "DetailType": "some-event-detail-type", + "Detail": json.dumps({"foo": "bar"}), + "EventBusName": "a-test-bus", + }, + ] + bridge.put_rule( + Name="a-test-bus-rule", + EventBusName="a-test-bus", + EventPattern="""{"source": [{"prefix": ""}]}""", + State="ENABLED", + ) + + bridge.list_rules() + queue_url = self.sqs_test_queue["QueueUrl"] + bridge.put_targets( + Rule="a-test-bus-rule", + Targets=[{"Id": "a-test-bus-rule-target", "Arn": "arn:aws:sqs:us-east-1:000000000000:Test"}], + ) + + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(bridge) + bridge.put_events(Entries=entries) + + self.sqs_client.receive_message(QueueUrl=queue_url, WaitTimeSeconds=2) + + bridge.delete_event_bus(Name="a-test-bus") + + @TracerTestCase.run_in_subprocess(env_overrides=dict()) + @pytest.mark.snapshot(ignores=snapshot_ignores) + @mock_kinesis + def test_aws_payload_tagging_kinesis(self): + with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): + client = self.session.create_client("kinesis", region_name="us-east-1") + stream_name = "test" + + partition_key = "1234" + data = [ + {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, + {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, + ] + + Pin.get_from(client).clone(tracer=self.tracer).onto(client) + + with self.tracer.trace("kinesis.manual_span"): + client.create_stream(StreamName=stream_name, ShardCount=1) + client.put_records(StreamName=stream_name, Records=data) diff --git a/tests/contrib/gunicorn/test_gunicorn.py b/tests/contrib/gunicorn/test_gunicorn.py index 1a1d7367a01..5906e5bfbf3 100644 --- a/tests/contrib/gunicorn/test_gunicorn.py +++ b/tests/contrib/gunicorn/test_gunicorn.py @@ -10,9 +10,7 @@ import pytest -from ddtrace.internal import compat from ddtrace.internal.utils.retry import RetryError # noqa:F401 -from tests.utils import flaky from tests.utils import snapshot_context from tests.webclient import Client @@ -111,7 +109,7 @@ def post_worker_init(worker): def gunicorn_server(gunicorn_server_settings, tmp_path): cfg_file = tmp_path / "gunicorn.conf.py" cfg = build_config_file(gunicorn_server_settings) - cfg_file.write_text(compat.stringify(cfg)) + cfg_file.write_text(cfg) cmd = [] if gunicorn_server_settings.use_ddtracerun: cmd = ["ddtrace-run"] @@ -167,7 +165,6 @@ def gunicorn_server(gunicorn_server_settings, tmp_path): ) -@flaky(until=1706677200) @pytest.mark.skipif(sys.version_info >= (3, 11), reason="Gunicorn is only supported up to 3.10") def test_no_known_errors_occur(tmp_path): for gunicorn_server_settings in [ @@ -185,7 +182,6 @@ def test_no_known_errors_occur(tmp_path): assert payload["profiler"]["is_active"] is True -@flaky(until=1706677200) @pytest.mark.skipif(sys.version_info >= (3, 11), reason="Gunicorn is only supported up to 3.10") def test_span_schematization(tmp_path): for schema_version in [None, "v0", "v1"]: diff --git a/tests/contrib/jobspec.yml b/tests/contrib/jobspec.yml new file mode 100644 index 00000000000..32f05864383 --- /dev/null +++ b/tests/contrib/jobspec.yml @@ -0,0 +1,360 @@ +aiohttp: + runner: riot + is_snapshot: true + services: + - httpbin_local + env: + SUITE_NAME: aiohttp + parallelism: 3 +asgi: + runner: riot + is_snapshot: true + env: + SUITE_NAME: "asgi$" +tornado: + runner: riot + is_snapshot: true + env: + SUITE_NAME: tornado +bottle: + runner: riot + is_snapshot: true + env: + SUITE_NAME: bottle + parallelism: 1 +celery: + runner: riot + is_snapshot: true + services: + - rabbitmq + - redis + env: + SUITE_NAME: celery + LOG_LEVEL: DEBUG + SNAPSHOT_DIR: /snapshots + PORT: 9126 + SNAPSHOT_CI: 1 + DD_POOL_TRACE_CHECK_FAILURES: true + DD_DISABLE_ERROR_RESPONSES: true + ENABLED_CHECKS: trace_stall,meta_tracer_version_header,trace_content_length,trace_peer_service,trace_dd_service # disable flaky content length check +cherrypy: + runner: riot + is_snapshot: true + env: + SUITE_NAME: cherrypy +dogpile_cache: + runner: riot + is_snapshot: true + env: + SUITE_NAME: dogpile_cache +elasticsearch: + runner: riot + is_snapshot: true + services: + - opensearch + - elasticsearch + env: + SUITE_NAME: elasticsearch + TEST_ELASTICSEARCH_HOST: "elasticsearch" + TEST_OPENSEARCH_HOST: "opensearch" + parallelism: 17 +falcon: + runner: riot + is_snapshot: true + env: + SUITE_NAME: falcon +django: + runner: riot + is_snapshot: true + services: + - postgres + - memcached + - redis + env: + SUITE_NAME: "django($|_celery)" + TEST_POSTGRES_HOST: "postgres" + TEST_MEMCACHED_HOST: "memcached" + TEST_REDIS_HOST: "redis" +django_hosts: + runner: riot + is_snapshot: true + env: + SUITE_NAME: "django_hosts$" +djangorestframework: + runner: riot + is_snapshot: true + services: + - memcached + - redis + env: + SUITE_NAME: djangorestframework + TEST_MEMCACHED_HOST: memcached + TEST_REDIS_HOST: redis +dramatiq: + runner: riot + is_snapshot: true + services: + - redis + env: + SUITE_NAME: dramatiq + TEST_REDIS_HOST: redis + parallelism: 2 +fastapi: + runner: riot + is_snapshot: true + env: + SUITE_NAME: fastapi +flask: + runner: riot + is_snapshot: true + services: + - memcached + - redis + env: + SUITE_NAME: flask + TEST_MEMCACHED_HOST: memcached + TEST_REDIS_HOST: redis + parallelism: 14 +gevent: + runner: riot + is_snapshot: true + env: + SUITE_NAME: gevent +graphene: + runner: riot + is_snapshot: true + env: + SUITE_NAME: graphene +graphql: + runner: riot + is_snapshot: true + env: + SUITE_NAME: graphql + parallelism: 6 +grpc: + runner: riot + is_snapshot: true + env: + SUITE_NAME: grpc +gunicorn: + runner: riot + is_snapshot: true + env: + SUITE_NAME: gunicorn + parallelism: 12 +httplib: + runner: riot + is_snapshot: true + services: + - httpbin_local + env: + SUITE_NAME: httplib +httpx: + runner: riot + is_snapshot: true + services: + - httpbin_local + env: + SUITE_NAME: httpx + parallelism: 2 +molten: + runner: riot + is_snapshot: true + env: + SUITE_NAME: molten +pylibmc: + runner: riot + is_snapshot: true + services: + - memcached + env: + SUITE_NAME: pylibmc +asynctest: + runner: riot + env: + SUITE_NAME: "asynctest$" + parallelism: 3 +pymemcache: + runner: riot + is_snapshot: true + services: + - memcached + env: + SUITE_NAME: pymemcache +pymongo: + runner: riot + is_snapshot: true + services: + - mongo + env: + SUITE_NAME: pymongo +pynamodb: + runner: riot + is_snapshot: true + env: + SUITE_NAME: pynamodb +pyodbc: + runner: riot + is_snapshot: true + env: + SUITE_NAME: pyodbc +pyramid: + runner: riot + is_snapshot: true + env: + SUITE_NAME: pyramid +requests: + runner: riot + is_snapshot: true + services: + - httpbin_local + env: + SUITE_NAME: requests +sanic: + runner: riot + is_snapshot: true + env: + SUITE_NAME: sanic +snowflake: + runner: riot + is_snapshot: true + env: + SUITE_NAME: snowflake +starlette: + runner: riot + is_snapshot: true + env: + SUITE_NAME: starlette +structlog: + runner: riot + is_snapshot: true + env: + SUITE_NAME: structlog +aredis: + runner: riot + is_snapshot: true + services: + - redis + env: + SUITE_NAME: "aredis$" + parallelism: 3 +yaaredis: + runner: riot + is_snapshot: true + services: + - redis + env: + SUITE_NAME: "yaaredis$" +redis: + runner: riot + is_snapshot: true + services: + - rediscluster + - redis + env: + SUITE_NAME: "^redis$" + parallelism: 5 +rediscluster: + runner: riot + is_snapshot: true + services: + - redis + - rediscluster + env: + SUITE_NAME: rediscluster +rq: + runner: riot + is_snapshot: true + services: + - redis + env: + SUITE_NAME: rq + parallelism: 2 +urllib3: + runner: riot + is_snapshot: true + services: + - httpbin_local + env: + SUITE_NAME: urllib3 + TEST_HTTPBIN_HOST: "httpbin-local" + TEST_HTTPBIN_PORT: "8001" +wsgi: + runner: riot + is_snapshot: true + env: + SUITE_NAME: wsgi +kafka: + runner: riot + is_snapshot: true + services: + - kafka + env: + SUITE_NAME: kafka + TEST_KAFKA_HOST: "kafka" + TEST_KAFKA_PORT: "29092" + parallelism: 4 +kombu: + runner: riot + is_snapshot: true + services: + - rabbitmq + env: + SUITE_NAME: kombu +jinja2: + runner: riot + is_snapshot: true + env: + SUITE_NAME: jinja2 +mako: + runner: riot + is_snapshot: true + env: + SUITE_NAME: mako +algoliasearch: + runner: riot + is_snapshot: true + env: + SUITE_NAME: algoliasearch +logbook: + runner: riot + is_snapshot: true + env: + SUITE_NAME: logbook +loguru: + runner: riot + is_snapshot: true + env: + SUITE_NAME: loguru +stdlib: + runner: riot + is_snapshot: true + env: + SUITE_NAME: 'asyncio$|sqlite3$|futures$|dbapi$|dbapi_async$' +test_logging: + runner: riot + is_snapshot: true + env: + SUITE_NAME: test_logging +subprocess: + runner: riot + env: + SUITE_NAME: subprocess +sourcecode: + runner: riot + env: + SUITE_NAME: sourcecode +opentracer: + runner: riot + env: + SUITE_NAME: opentracer +protobuf: + runner: riot + is_snapshot: true + env: + SUITE_NAME: protobuf +avro: + runner: riot + is_snapshot: true + env: + SUITE_NAME: avro diff --git a/tests/coverage/included_path/async_code.py b/tests/coverage/included_path/async_code.py new file mode 100644 index 00000000000..e21d694ff02 --- /dev/null +++ b/tests/coverage/included_path/async_code.py @@ -0,0 +1,14 @@ +import asyncio +import sys + + +async def get_line_number(): + try: + raise Exception("this is line 7") + except Exception: + exception_type, exception, traceback = sys.exc_info() + return traceback.tb_lineno + + +def call_async_function_and_report_line_number(): + return asyncio.run(get_line_number()) diff --git a/tests/coverage/test_coverage_async.py b/tests/coverage/test_coverage_async.py new file mode 100644 index 00000000000..059da55fc7d --- /dev/null +++ b/tests/coverage/test_coverage_async.py @@ -0,0 +1,52 @@ +import pytest + + +@pytest.mark.subprocess +def test_coverage_async_function(): + """ + Async functions in Python 3.10 have an initial GEN_START instruction with no corresponding line number. + This test ensures we can handle this case correctly and build the line number table correctly in the instrumented + functions. + """ + import os + from pathlib import Path + + from ddtrace.internal.coverage.code import ModuleCodeCollector + from ddtrace.internal.coverage.installer import install + from tests.coverage.utils import _get_relpath_dict + + cwd_path = os.getcwd() + include_path = Path(cwd_path + "/tests/coverage/included_path/") + + install(include_paths=[include_path], collect_import_time_coverage=True) + + from tests.coverage.included_path.async_code import call_async_function_and_report_line_number + + ModuleCodeCollector.start_coverage() + line_number = call_async_function_and_report_line_number() + ModuleCodeCollector.stop_coverage() + + executable = _get_relpath_dict(cwd_path, ModuleCodeCollector._instance.lines) + covered = _get_relpath_dict(cwd_path, ModuleCodeCollector._instance._get_covered_lines(include_imported=False)) + covered_with_imports = _get_relpath_dict( + cwd_path, ModuleCodeCollector._instance._get_covered_lines(include_imported=True) + ) + + expected_executable = { + "tests/coverage/included_path/async_code.py": {1, 2, 5, 6, 7, 8, 9, 10, 13, 14}, + } + expected_covered = { + "tests/coverage/included_path/async_code.py": {6, 7, 8, 9, 10, 14}, + } + expected_covered_with_imports = { + "tests/coverage/included_path/async_code.py": {1, 2, 5, 6, 7, 8, 9, 10, 13, 14}, + } + + assert ( + executable == expected_executable + ), f"Executable lines mismatch: expected={expected_executable} vs actual={executable}" + assert covered == expected_covered, f"Covered lines mismatch: expected={expected_covered} vs actual={covered}" + assert ( + covered_with_imports == expected_covered_with_imports + ), f"Covered lines with imports mismatch: expected={expected_covered_with_imports} vs actual={covered_with_imports}" + assert line_number == 7 diff --git a/tests/debugging/exception/test_replay.py b/tests/debugging/exception/test_replay.py index e2c75ff051e..8b9a2a7d830 100644 --- a/tests/debugging/exception/test_replay.py +++ b/tests/debugging/exception/test_replay.py @@ -1,3 +1,4 @@ +from collections import Counter from contextlib import contextmanager import sys @@ -31,6 +32,46 @@ def test_exception_replay_config_enabled_deprecated(monkeypatch): assert not er_config.enabled +def test_exception_chain_ident(): + def a(v, d=None): + if not v: + raise ValueError("hello", v) + + def b_chain(bar): + m = 4 + try: + a(bar % m) + except ValueError: + raise KeyError("chain it") + + def c(foo=42): + sh = 3 + b_chain(foo << sh) + + exc_idents = Counter() + + for _ in range(100): + try: + c() + except Exception as e: + chain, _ = replay.unwind_exception_chain(e, e.__traceback__) + exc_idents[replay.exception_chain_ident(chain)] += 1 + + assert len(exc_idents) == 1 + + # This generates a different exception chain since c is called at a + # different line number + for _ in range(100): + try: + c() + except Exception as e: + chain, _ = replay.unwind_exception_chain(e, e.__traceback__) + exc_idents[replay.exception_chain_ident(chain)] += 1 + + assert len(exc_idents) == 2 + assert all(v == 100 for v in exc_idents.values()) + + @contextmanager def with_rate_limiter(limiter): original_limiter = replay.GLOBAL_RATE_LIMITER @@ -212,3 +253,44 @@ def b(): assert span_a.name == "a" assert span_a.get_tag(replay.DEBUG_INFO_TAG) == "true" assert span_b.get_tag(replay.DEBUG_INFO_TAG) is None + + def test_debugger_exception_ident_limit(self): + def a(v, d=None): + with self.trace("a"): + if not v: + raise ValueError("hello", v) + + def b_chain(bar): + with self.trace("b"): + m = 4 + try: + a(bar % m) + except ValueError: + raise KeyError("chain it") + + def c(foo=42): + with self.trace("c"): + sh = 3 + b_chain(foo << sh) + + with exception_replay() as uploader: + rate_limiter = RateLimiter( + limit_rate=float("inf"), # no rate limit + raise_on_exceed=False, + ) + with with_rate_limiter(rate_limiter): + with pytest.raises(KeyError): + c() + + self.assert_span_count(3) + assert len(uploader.collector.queue) == 3 + + # Invoke again. The same exception won't be captured again in quick + # succession + with with_rate_limiter(rate_limiter): + with pytest.raises(KeyError): + c() + + self.assert_span_count(6) + # no new snapshots + assert len(uploader.collector.queue) == 3 diff --git a/tests/debugging/jobspec.yml b/tests/debugging/jobspec.yml new file mode 100644 index 00000000000..0e306aa642b --- /dev/null +++ b/tests/debugging/jobspec.yml @@ -0,0 +1,5 @@ +debugger: + runner: riot + env: + SUITE_NAME: debugger + parallelism: 2 diff --git a/tests/jobspec.yml b/tests/jobspec.yml new file mode 100644 index 00000000000..0005344a045 --- /dev/null +++ b/tests/jobspec.yml @@ -0,0 +1,46 @@ +internal: + runner: riot + is_snapshot: true + env: + SUITE_NAME: internal +tracer: + runner: riot + env: + SUITE_NAME: tracer + DD_TRACE_AGENT_URL: "http://localhost:8126" + parallelism: 9 + retry: 2 +telemetry: + runner: riot + is_snapshot: true + env: + SUITE_NAME: telemetry + parallelism: 6 +profile: + runner: riot + env: + SUITE_NAME: "profile$|profile-v2" + DD_TRACE_AGENT_URL: "" + parallelism: 20 + retry: 2 +integration_testagent: + runner: riot + is_snapshot: true + env: + SUITE_NAME: "integration-snapshot*" +integration_agent: + runner: riot + env: + SUITE_NAME: "integration-latest*" +vendor: + runner: riot + env: + SUITE_NAME: vendor + parallelism: 1 +ddtracerun: + runner: riot + services: + - redis + env: + SUITE_NAME: ddtracerun + parallelism: 6 diff --git a/tests/llmobs/jobspec.yml b/tests/llmobs/jobspec.yml new file mode 100644 index 00000000000..f039ce8302c --- /dev/null +++ b/tests/llmobs/jobspec.yml @@ -0,0 +1,27 @@ +llmobs: + runner: riot + env: + SUITE_NAME: llmobs +openai: + runner: riot + is_snapshot: true + env: + SUITE_NAME: openai + parallelism: 10 +langchain: + runner: riot + is_snapshot: true + env: + SUITE_NAME: langchain + parallelism: 6 +anthropic: + runner: riot + is_snapshot: true + env: + SUITE_NAME: anthropic + parallelism: 3 +google_generativeai: + runner: riot + is_snapshot: true + env: + SUITE_NAME: google_generativeai diff --git a/tests/profiling/collector/pprof_utils.py b/tests/profiling/collector/pprof_utils.py index 59f2795b6d6..95f865a4cdf 100644 --- a/tests/profiling/collector/pprof_utils.py +++ b/tests/profiling/collector/pprof_utils.py @@ -281,7 +281,7 @@ def assert_sample_has_locations(profile, sample, expected_locations: Optional[Li if expected_locations_idx < len(expected_locations): if ( function_name.endswith(expected_locations[expected_locations_idx].function_name) - and filename == expected_locations[expected_locations_idx].filename + and re.fullmatch(expected_locations[expected_locations_idx].filename, filename) and line_no == expected_locations[expected_locations_idx].line_no ): expected_locations_idx += 1 diff --git a/tests/profiling_v2/collector/test_stack.py b/tests/profiling_v2/collector/test_stack.py index d7e59a9266c..5d9007248bc 100644 --- a/tests/profiling_v2/collector/test_stack.py +++ b/tests/profiling_v2/collector/test_stack.py @@ -16,6 +16,15 @@ from tests.profiling.collector import pprof_utils +# Python 3.11.9 is not compatible with gevent, https://github.com/gevent/gevent/issues/2040 +# https://github.com/python/cpython/issues/117983 +# The fix was not backported to 3.11. The fix was first released in 3.12.5 for +# Python 3.12. Tested with Python 3.11.8 and 3.12.5 to confirm the issue. +TESTING_GEVENT = os.getenv("DD_PROFILE_TEST_GEVENT", False) and ( + sys.version_info < (3, 11, 9) or sys.version_info >= (3, 12, 5) +) + + @pytest.mark.parametrize("stack_v2_enabled", [True, False]) def test_stack_locations(stack_v2_enabled, tmp_path): if sys.version_info[:2] == (3, 7) and stack_v2_enabled: @@ -30,7 +39,7 @@ def test_stack_locations(stack_v2_enabled, tmp_path): ddup.start() def baz(): - time.sleep(0.01) + time.sleep(0.1) def bar(): baz() @@ -39,7 +48,7 @@ def foo(): bar() with stack.StackCollector(None, _stack_collector_v2_enabled=stack_v2_enabled): - for _ in range(5): + for _ in range(10): foo() ddup.upload() @@ -100,8 +109,8 @@ def test_push_span(stack_v2_enabled, tmp_path): with tracer.trace("foobar", resource=resource, span_type=span_type) as span: span_id = span.span_id local_root_span_id = span._local_root.span_id - for _ in range(5): - time.sleep(0.01) + for _ in range(10): + time.sleep(0.1) ddup.upload() profile = pprof_utils.parse_profile(output_filename) @@ -140,8 +149,8 @@ def test_push_span_unregister_thread(tmp_path, monkeypatch): span_type = ext.SpanTypes.WEB def target_fun(): - for _ in range(5): - time.sleep(0.01) + for _ in range(10): + time.sleep(0.1) with stack.StackCollector( None, @@ -205,8 +214,8 @@ def test_push_non_web_span(stack_v2_enabled, tmp_path): with tracer.trace("foobar", resource=resource, span_type=span_type) as span: span_id = span.span_id local_root_span_id = span._local_root.span_id - for _ in range(5): - time.sleep(0.01) + for _ in range(10): + time.sleep(0.1) ddup.upload() profile = pprof_utils.parse_profile(output_filename) @@ -255,8 +264,8 @@ def test_push_span_none_span_type(stack_v2_enabled, tmp_path): with tracer.trace("foobar", resource=resource, span_type=None) as span: span_id = span.span_id local_root_span_id = span._local_root.span_id - for _ in range(5): - time.sleep(0.01) + for _ in range(10): + time.sleep(0.1) ddup.upload() profile = pprof_utils.parse_profile(output_filename) @@ -348,7 +357,7 @@ def target_fun(): time.sleep(1) threads = [] - for _ in range(5): + for _ in range(10): t = threading.Thread(target=target_fun) threads.append(t) t.start() @@ -451,8 +460,8 @@ def sleep_class(cls): return cls().sleep_instance() def sleep_instance(self): - for _ in range(5): - time.sleep(0.01) + for _ in range(10): + time.sleep(0.1) test_name = "test_collect_once_with_class" pprof_prefix = str(tmp_path / test_name) @@ -510,8 +519,8 @@ def sleep_class(foobar, cls): return foobar().sleep_instance(cls) def sleep_instance(foobar, self): - for _ in range(5): - time.sleep(0.01) + for _ in range(10): + time.sleep(0.1) test_name = "test_collect_once_with_class" pprof_prefix = str(tmp_path / test_name) @@ -558,3 +567,128 @@ def sleep_instance(foobar, self): ], ), ) + + +def _fib(n): + if n == 1: + return 1 + elif n == 0: + return 0 + else: + return _fib(n - 1) + _fib(n - 2) + + +@pytest.mark.skipif(not TESTING_GEVENT, reason="Not testing gevent") +@pytest.mark.subprocess(ddtrace_run=True) +def test_collect_gevent_thread_task(): + # TODO(taegyunkim): update echion to support gevent and test with stack v2 + + from gevent import monkey + + monkey.patch_all() + + import os + import threading + import time + + from ddtrace.internal.datadog.profiling import ddup + from ddtrace.profiling.collector import stack + from tests.profiling.collector import pprof_utils + from tests.profiling_v2.collector.test_stack import _fib + + test_name = "test_collect_gevent_thread_task" + pprof_prefix = "/tmp/" + test_name + output_filename = pprof_prefix + "." + str(os.getpid()) + + assert ddup.is_available + ddup.config(env="test", service=test_name, version="my_version", output_filename=pprof_prefix) + ddup.start() + + # Start some (green)threads + def _dofib(): + for _ in range(5): + # spend some time in CPU so the profiler can catch something + # On a Mac w/ Apple M3 MAX with Python 3.11 it takes about 200ms to calculate _fib(32) + # And _fib() is called 5 times so it should take about 1 second + # We use 5 threads below so it should take about 5 seconds + _fib(32) + # Just make sure gevent switches threads/greenlets + time.sleep(0) + + threads = [] + + with stack.StackCollector(None, _stack_collector_v2_enabled=False): + for i in range(5): + t = threading.Thread(target=_dofib, name="TestThread %d" % i) + t.start() + threads.append(t) + for t in threads: + t.join() + + ddup.upload() + + expected_task_ids = {thread.ident for thread in threads} + + profile = pprof_utils.parse_profile(output_filename) + samples = pprof_utils.get_samples_with_label_key(profile, "task id") + assert len(samples) > 0 + + checked_thread = False + + for sample in samples: + task_id_label = pprof_utils.get_label_with_key(profile.string_table, sample, "task id") + task_id = int(task_id_label.num) + if task_id in expected_task_ids: + pprof_utils.assert_stack_event( + profile, + sample, + pprof_utils.StackEvent( + task_name=r"TestThread \d+$", + task_id=task_id, + ), + ) + checked_thread = True + + assert checked_thread, "No samples found for the expected threads" + + +@pytest.mark.parametrize( + ("stack_v2_enabled", "ignore_profiler"), [(True, True), (True, False), (False, True), (False, False)] +) +def test_ignore_profiler(stack_v2_enabled, ignore_profiler, tmp_path): + if sys.version_info[:2] == (3, 7) and stack_v2_enabled: + pytest.skip("stack_v2 is not supported on Python 3.7") + + test_name = "test_ignore_profiler" + pprof_prefix = str(tmp_path / test_name) + output_filename = pprof_prefix + "." + str(os.getpid()) + + assert ddup.is_available + ddup.config(env="test", service=test_name, version="my_version", output_filename=pprof_prefix) + ddup.start() + + s = stack.StackCollector(None, ignore_profiler=ignore_profiler, _stack_collector_v2_enabled=stack_v2_enabled) + collector_worker_thread_id = None + + with s: + for _ in range(10): + time.sleep(0.1) + collector_worker_thread_id = s._worker.ident + + ddup.upload() + + profile = pprof_utils.parse_profile(output_filename) + samples = pprof_utils.get_samples_with_label_key(profile, "thread id") + + thread_ids = set() + + for sample in samples: + thread_id_label = pprof_utils.get_label_with_key(profile.string_table, sample, "thread id") + thread_id = int(thread_id_label.num) + thread_ids.add(thread_id) + + # TODO(taegyunkim): update echion to support ignore_profiler and test with stack v2 + if stack_v2_enabled or not ignore_profiler: + assert collector_worker_thread_id in thread_ids + else: + assert collector_worker_thread_id not in thread_ids diff --git a/tests/profiling_v2/test_gunicorn.py b/tests/profiling_v2/test_gunicorn.py index 79c33777a59..ef97d9683f8 100644 --- a/tests/profiling_v2/test_gunicorn.py +++ b/tests/profiling_v2/test_gunicorn.py @@ -93,15 +93,17 @@ def _test_gunicorn(gunicorn, tmp_path, monkeypatch, *args): samples = pprof_utils.get_samples_with_value_type(profile, "cpu-time") assert len(samples) > 0 + # DEV: somehow the filename is reported as either __init__.py or gunicorn-app.py + # when run on GitLab CI. We need to match either of these two. + filename_regex = r"^(?:__init__\.py|gunicorn-app\.py)$" + + expected_location = pprof_utils.StackLocation(function_name="fib", filename=filename_regex, line_no=8) + pprof_utils.assert_profile_has_sample( profile, samples=samples, - expected_sample=pprof_utils.StackEvent( - locations=[ - pprof_utils.StackLocation(function_name="fib", filename="gunicorn-app.py", line_no=8), - pprof_utils.StackLocation(function_name="fib", filename="gunicorn-app.py", line_no=8), - ] - ), + # DEV: we expect multiple locations as fibonacci is recursive + expected_sample=pprof_utils.StackEvent(locations=[expected_location, expected_location]), ) diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_eventbridge.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_eventbridge.json new file mode 100644 index 00000000000..4a9752f5e7b --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_eventbridge.json @@ -0,0 +1,358 @@ +[[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.receivemessage", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219200000000", + "aws.agent": "botocore", + "aws.operation": "ReceiveMessage", + "aws.region": "us-east-1", + "aws.request.body.MessageAttributeNames.0": "_datadog", + "aws.request.body.QueueUrl": "http://localhost:4566/000000000000/Test", + "aws.request.body.WaitTimeSeconds": "2", + "aws.requestid": "16AU1RXT4RWPY1HXDGGNCCRF5GUXUVSG8R1Y8WTJYHHHBGPOXQK0", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "1577", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:44:34 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "16AU1RXT4RWPY1HXDGGNCCRF5GUXUVSG8R1Y8WTJYHHHBGPOXQK0", + "aws.response.body.RetryAttempts": "0", + "aws.sqs.queue_name": "Test", + "aws_account": "000000000000", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 1347317707, + "start": 1730486674014275278 + }], +[ + { + "name": "events.command", + "service": "test-botocore-tracing.events", + "resource": "events.deleteeventbus", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219300000000", + "aws.agent": "botocore", + "aws.operation": "DeleteEventBus", + "aws.region": "us-east-1", + "aws.requestid": "EVXSBTVJ91PNTZP5MRXLAMCQTJ8AM6OD93HW9Y71W0QTQQ8FKBY1", + "aws_service": "events", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "rulename": "a-test-bus", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 9324846, + "start": 1730486675361990473 + }], +[ + { + "name": "events.command", + "service": "test-botocore-tracing.events", + "resource": "events.putevents", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "PutEvents", + "aws.region": "us-east-1", + "aws.requestid": "60SMT4RD398SJX3QGMNKLNJWVJ2QO0PR21XZPXIUFDOLFU026ZIF", + "aws_service": "events", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "rulename": "", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 148280179, + "start": 1730486673839166140 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "AIJI264DD6Q3YCHTEZTMNT0J9H7XHWUHB0ZUCYQNA1MFIB3B6M29", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 9381944, + "start": 1730486673072395949 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 4, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "9CR6ZK55HZEO5DH880VV6MQM1VBIMWOY6TVI8X0UNC1IEFETHG33", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 7090898, + "start": 1730486673082518808 + }], +[ + { + "name": "events.command", + "service": "aws.events", + "resource": "events.createeventbus", + "trace_id": 5, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "CreateEventBus", + "aws.region": "us-east-1", + "aws.requestid": "3JM3A50OTY7PFQ6JJJ1O1YJWZ3D17LS1ZDL4NML7J8FKUEOTTYSI", + "aws_service": "events", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "rulename": "a-test-bus", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 698383864, + "start": 1730486673115255701 + }], +[ + { + "name": "events.command", + "service": "aws.events", + "resource": "events.putrule", + "trace_id": 6, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "PutRule", + "aws.region": "us-east-1", + "aws.requestid": "KNF5UFDRA15DP2B69OWT2144PWDEEGTL1TPD5JCY0VJR82J4T0O0", + "aws_service": "events", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "rulename": "a-test-bus-rule", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 7914310, + "start": 1730486673814005493 + }], +[ + { + "name": "events.command", + "service": "aws.events", + "resource": "events.listrules", + "trace_id": 7, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "ListRules", + "aws.region": "us-east-1", + "aws.requestid": "RSVB2I8YU33IJJTMR0OJFDC6PTNJ4YN70PBALOMPO4CEIUVO40RM", + "aws_service": "events", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 8146264, + "start": 1730486673822260931 + }], +[ + { + "name": "events.command", + "service": "aws.events", + "resource": "events.puttargets", + "trace_id": 8, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725219100000000", + "aws.agent": "botocore", + "aws.operation": "PutTargets", + "aws.region": "us-east-1", + "aws.requestid": "B9WMS96T4C4CO40IRKN8NE6RKATC8FU7W5I9EQNE71WQZE7S3ZD2", + "aws_service": "events", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "rulename": "", + "runtime-id": "45abaca6e35b47d4bbcab4d19d47895a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 5659, + "retry_attempts": 0 + }, + "duration": 7987546, + "start": 1730486673830745107 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_kinesis.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_kinesis.json new file mode 100644 index 00000000000..5747ae5cb7f --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_kinesis.json @@ -0,0 +1,176 @@ +[[ + { + "name": "kinesis.manual_span", + "service": "", + "resource": "kinesis.manual_span", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6725212f00000000", + "language": "python", + "runtime-id": "2139211528f64010b06b4e04ea9d4202" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 4315 + }, + "duration": 2128653547, + "start": 1730486575665558320 + }, + { + "name": "kinesis.command", + "service": "aws.kinesis", + "resource": "kinesis.createstream", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "aws.agent": "botocore", + "aws.kinesis.stream_name": "test", + "aws.operation": "CreateStream", + "aws.region": "us-east-1", + "aws.request.body.ShardCount": "1", + "aws.request.body.StreamName": "test", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:42:56 GMT", + "aws.response.body.HTTPHeaders.server": "amazon.com", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RetryAttempts": "0", + "aws_service": "kinesis", + "component": "botocore", + "http.status_code": "200", + "region": "us-east-1", + "span.kind": "client", + "streamname": "test" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "retry_attempts": 0 + }, + "duration": 1222098056, + "start": 1730486575665790837 + }, + { + "name": "kinesis.command", + "service": "aws.kinesis", + "resource": "kinesis.putrecords", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "aws.agent": "botocore", + "aws.kinesis.stream_name": "test", + "aws.operation": "PutRecords", + "aws.region": "us-east-1", + "aws.request.body.Records.0.Data.Hello": "World", + "aws.request.body.Records.0.PartitionKey": "1234", + "aws.request.body.Records.1.Data.foo": "bar", + "aws.request.body.Records.1.PartitionKey": "1234", + "aws.request.body.StreamName": "test", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:42:57 GMT", + "aws.response.body.HTTPHeaders.server": "amazon.com", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RetryAttempts": "0", + "aws_service": "kinesis", + "component": "botocore", + "http.status_code": "200", + "region": "us-east-1", + "span.kind": "client", + "streamname": "test" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "retry_attempts": 0 + }, + "duration": 905954737, + "start": 1730486576888145627 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725212f00000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "S0Y391B6GM02XQHP7AAXP8BV0IZ0PTOUQY82NR7SHN651ERF2H8W", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "2139211528f64010b06b4e04ea9d4202", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 4315, + "retry_attempts": 0 + }, + "duration": 9310053, + "start": 1730486575625781470 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725212f00000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "HZLL7XGXSPWJD740TO4PBM4MN8CCVOL1UJRHUSV05ANLZCC0OEHN", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "2139211528f64010b06b4e04ea9d4202", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 4315, + "retry_attempts": 0 + }, + "duration": 6980220, + "start": 1730486575636068298 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3.json new file mode 100644 index 00000000000..123984f0989 --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3.json @@ -0,0 +1,196 @@ +[[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listobjects", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 1, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "67251ffe00000000", + "aws.agent": "botocore", + "aws.operation": "ListObjects", + "aws.region": "us-west-2", + "aws.request.body.bucket": "mybucket", + "aws_service": "s3", + "component": "botocore", + "error.message": "Parameter validation failed:\nMissing required parameter in input: \"Bucket\"\nUnknown parameter in input: \"bucket\", must be one of: Bucket, Delimiter, EncodingType, Marker, MaxKeys, Prefix, RequestPayer, ExpectedBucketOwner, OptionalObjectAttributes", + "error.stack": "Traceback (most recent call last):\n File \"/root/project/ddtrace/contrib/internal/botocore/patch.py\", line 253, in patched_api_call_fallback\n result = original_func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/client.py\", line 962, in _make_api_call\n request_dict = self._convert_to_request_dict(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/client.py\", line 1036, in _convert_to_request_dict\n request_dict = self._serializer.serialize_to_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/validate.py\", line 381, in serialize_to_request\n raise ParamValidationError(report=report.generate_report())\nbotocore.exceptions.ParamValidationError: Parameter validation failed:\nMissing required parameter in input: \"Bucket\"\nUnknown parameter in input: \"bucket\", must be one of: Bucket, Delimiter, EncodingType, Marker, MaxKeys, Prefix, RequestPayer, ExpectedBucketOwner, OptionalObjectAttributes\n", + "error.type": "botocore.exceptions.ParamValidationError", + "language": "python", + "region": "us-west-2", + "runtime-id": "010e1ba41dc54c378179fd51445afc77", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 3979 + }, + "duration": 457826385, + "start": 1730486270132291421 + }], +[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listbuckets", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "67251ffc00000000", + "aws.agent": "botocore", + "aws.operation": "ListBuckets", + "aws.region": "us-west-2", + "aws.requestid": "niWe5HO1MgcT2vq21MtdLzhY2xGs3kiyEzq7yR8ShXhvU6qQLAKa", + "aws.response.body.HTTPHeaders.x-amzn-requestid": "niWe5HO1MgcT2vq21MtdLzhY2xGs3kiyEzq7yR8ShXhvU6qQLAKa", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "niWe5HO1MgcT2vq21MtdLzhY2xGs3kiyEzq7yR8ShXhvU6qQLAKa", + "aws.response.body.RetryAttempts": "0", + "aws_service": "s3", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-west-2", + "runtime-id": "010e1ba41dc54c378179fd51445afc77", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 3979, + "retry_attempts": 0 + }, + "duration": 943066360, + "start": 1730486268691244191 + }], +[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listbuckets", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "67251ffd00000000", + "aws.agent": "botocore", + "aws.operation": "ListBuckets", + "aws.region": "us-west-2", + "aws.requestid": "JaXNUT8bsHmE8bhkTRBiExKb1QWSI4qU3TnOHrfJX102cbxUDJ6S", + "aws.response.body.HTTPHeaders.x-amzn-requestid": "JaXNUT8bsHmE8bhkTRBiExKb1QWSI4qU3TnOHrfJX102cbxUDJ6S", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "JaXNUT8bsHmE8bhkTRBiExKb1QWSI4qU3TnOHrfJX102cbxUDJ6S", + "aws.response.body.RetryAttempts": "0", + "aws_service": "s3", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-west-2", + "runtime-id": "010e1ba41dc54c378179fd51445afc77", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 3979, + "retry_attempts": 0 + }, + "duration": 493884956, + "start": 1730486269635719446 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "67251ffc00000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "BSU0QP7O8U7LB7A9GTEKB4PZBU80TJP477RS4QH8O4NT83DY5ZV2", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "010e1ba41dc54c378179fd51445afc77", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 3979, + "retry_attempts": 0 + }, + "duration": 19671972, + "start": 1730486268555991113 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 4, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "67251ffc00000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "XYUX43DOF9DK8T4FVR9ANS4XBO6F7T25X5WR6UUNPIR9JBA1EYH5", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "010e1ba41dc54c378179fd51445afc77", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 3979, + "retry_attempts": 0 + }, + "duration": 25330622, + "start": 1730486268578039557 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3_invalid_config.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3_invalid_config.json new file mode 100644 index 00000000000..12d47eb2b2e --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3_invalid_config.json @@ -0,0 +1,196 @@ +[[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listobjects", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 1, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce68500000000", + "aws.agent": "botocore", + "aws.operation": "ListObjects", + "aws.region": "us-west-2", + "aws.request.body.bucket": "mybucket", + "aws_service": "s3", + "component": "botocore", + "error.message": "Parameter validation failed:\nMissing required parameter in input: \"Bucket\"\nUnknown parameter in input: \"bucket\", must be one of: Bucket, Delimiter, EncodingType, Marker, MaxKeys, Prefix, RequestPayer, ExpectedBucketOwner, OptionalObjectAttributes", + "error.stack": "Traceback (most recent call last):\n File \"/root/project/ddtrace/contrib/internal/botocore/patch.py\", line 260, in patched_api_call_fallback\n result = original_func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/client.py\", line 962, in _make_api_call\n request_dict = self._convert_to_request_dict(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/client.py\", line 1036, in _convert_to_request_dict\n request_dict = self._serializer.serialize_to_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/validate.py\", line 381, in serialize_to_request\n raise ParamValidationError(report=report.generate_report())\nbotocore.exceptions.ParamValidationError: Parameter validation failed:\nMissing required parameter in input: \"Bucket\"\nUnknown parameter in input: \"bucket\", must be one of: Bucket, Delimiter, EncodingType, Marker, MaxKeys, Prefix, RequestPayer, ExpectedBucketOwner, OptionalObjectAttributes\n", + "error.type": "botocore.exceptions.ParamValidationError", + "language": "python", + "region": "us-west-2", + "runtime-id": "04e00e3356e2405a80487efdd62ed083", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15909 + }, + "duration": 258360186, + "start": 1730995845585860675 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce68400000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "WH66MOGDZOAHLB9IDH1YENVWQWHNIFBCEYY6WXS90XPY62O5G9VQ", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "04e00e3356e2405a80487efdd62ed083", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15909, + "retry_attempts": 0 + }, + "duration": 10536457, + "start": 1730995844726668252 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce68400000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "Y3NGMJ72PNIQFJYS5Q7ZBP1WOQOS66HXSC9EVWZV6S0FT38QGYK4", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "04e00e3356e2405a80487efdd62ed083", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15909, + "retry_attempts": 0 + }, + "duration": 8369723, + "start": 1730995844738366514 + }], +[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listbuckets", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce68400000000", + "aws.agent": "botocore", + "aws.operation": "ListBuckets", + "aws.region": "us-west-2", + "aws.requestid": "QKK7SrxEnTcIlaipjYlzX0PtU7L57aCZZDk0FeqDwc1n5o2kTHch", + "aws.response.body.HTTPHeaders.x-amzn-requestid": "QKK7SrxEnTcIlaipjYlzX0PtU7L57aCZZDk0FeqDwc1n5o2kTHch", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "QKK7SrxEnTcIlaipjYlzX0PtU7L57aCZZDk0FeqDwc1n5o2kTHch", + "aws.response.body.RetryAttempts": "0", + "aws_service": "s3", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-west-2", + "runtime-id": "04e00e3356e2405a80487efdd62ed083", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15909, + "retry_attempts": 0 + }, + "duration": 511793254, + "start": 1730995844812363498 + }], +[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listbuckets", + "trace_id": 4, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce68500000000", + "aws.agent": "botocore", + "aws.operation": "ListBuckets", + "aws.region": "us-west-2", + "aws.requestid": "6K6tOztZCjgQlL0jLXtNBRrRYUlYdzY28c5oBJ68sPm8V2puTSf1", + "aws.response.body.HTTPHeaders.x-amzn-requestid": "6K6tOztZCjgQlL0jLXtNBRrRYUlYdzY28c5oBJ68sPm8V2puTSf1", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "6K6tOztZCjgQlL0jLXtNBRrRYUlYdzY28c5oBJ68sPm8V2puTSf1", + "aws.response.body.RetryAttempts": "0", + "aws_service": "s3", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-west-2", + "runtime-id": "04e00e3356e2405a80487efdd62ed083", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15909, + "retry_attempts": 0 + }, + "duration": 258140931, + "start": 1730995845324937218 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3_valid_config.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3_valid_config.json new file mode 100644 index 00000000000..e33bc31123e --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_s3_valid_config.json @@ -0,0 +1,196 @@ +[[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listobjects", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 1, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce67a00000000", + "aws.agent": "botocore", + "aws.operation": "ListObjects", + "aws.region": "us-west-2", + "aws.request.body.bucket": "redacted", + "aws_service": "s3", + "component": "botocore", + "error.message": "Parameter validation failed:\nMissing required parameter in input: \"Bucket\"\nUnknown parameter in input: \"bucket\", must be one of: Bucket, Delimiter, EncodingType, Marker, MaxKeys, Prefix, RequestPayer, ExpectedBucketOwner, OptionalObjectAttributes", + "error.stack": "Traceback (most recent call last):\n File \"/root/project/ddtrace/contrib/internal/botocore/patch.py\", line 260, in patched_api_call_fallback\n result = original_func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/client.py\", line 962, in _make_api_call\n request_dict = self._convert_to_request_dict(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/client.py\", line 1036, in _convert_to_request_dict\n request_dict = self._serializer.serialize_to_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/project/.riot/venv_py3119_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_moto[all]50_pytest-randomly_vcrpy601_botocore13449_boto313449/lib/python3.11/site-packages/botocore/validate.py\", line 381, in serialize_to_request\n raise ParamValidationError(report=report.generate_report())\nbotocore.exceptions.ParamValidationError: Parameter validation failed:\nMissing required parameter in input: \"Bucket\"\nUnknown parameter in input: \"bucket\", must be one of: Bucket, Delimiter, EncodingType, Marker, MaxKeys, Prefix, RequestPayer, ExpectedBucketOwner, OptionalObjectAttributes\n", + "error.type": "botocore.exceptions.ParamValidationError", + "language": "python", + "region": "us-west-2", + "runtime-id": "759db61f971e4e51b3876c2b0771e2b6", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15573 + }, + "duration": 425924882, + "start": 1730995834381654112 + }], +[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listbuckets", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce67900000000", + "aws.agent": "botocore", + "aws.operation": "ListBuckets", + "aws.region": "us-west-2", + "aws.requestid": "B7mZ00jPCOhjiOfnfZFCXJR1vlFYkR6uX51Sme72vnNfFzaHVB7D", + "aws.response.body.HTTPHeaders": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "B7mZ00jPCOhjiOfnfZFCXJR1vlFYkR6uX51Sme72vnNfFzaHVB7D", + "aws.response.body.RetryAttempts": "0", + "aws_service": "s3", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-west-2", + "runtime-id": "759db61f971e4e51b3876c2b0771e2b6", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15573, + "retry_attempts": 0 + }, + "duration": 711895059, + "start": 1730995833210793741 + }], +[ + { + "name": "s3.command", + "service": "test-botocore-tracing.s3", + "resource": "s3.listbuckets", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce67900000000", + "aws.agent": "botocore", + "aws.operation": "ListBuckets", + "aws.region": "us-west-2", + "aws.requestid": "6rKSkfcYHp8uqW54aYbfS9f5l3nDFA2Xw0dgU65FZF6zAtCQLZa3", + "aws.response.body.HTTPHeaders": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "6rKSkfcYHp8uqW54aYbfS9f5l3nDFA2Xw0dgU65FZF6zAtCQLZa3", + "aws.response.body.RetryAttempts": "0", + "aws_service": "s3", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-west-2", + "runtime-id": "759db61f971e4e51b3876c2b0771e2b6", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15573, + "retry_attempts": 0 + }, + "duration": 454908141, + "start": 1730995833924042857 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce67900000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "UZPT9ML68W82KW95PYMN61QF3HRNBY44SH0V1PPSG8W3QH1FL7OX", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "759db61f971e4e51b3876c2b0771e2b6", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15573, + "retry_attempts": 0 + }, + "duration": 12420784, + "start": 1730995833133328246 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 4, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce67900000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "PUCYX1ZISWAFZSMWH7WZ5YZXJIM7PGIN2WHWKSSNEZUDYYCX6ZWW", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "759db61f971e4e51b3876c2b0771e2b6", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 15573, + "retry_attempts": 0 + }, + "duration": 8048502, + "start": 1730995833146606461 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sns.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sns.json new file mode 100644 index 00000000000..25f05615402 --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sns.json @@ -0,0 +1,386 @@ +[[ + { + "name": "sns.command", + "service": "test-botocore-tracing.sns", + "resource": "sns.deletetopic", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229500000000", + "aws.agent": "botocore", + "aws.operation": "DeleteTopic", + "aws.region": "us-east-1", + "aws.request.body.TopicArn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws.requestid": "Q2KSL71R5UZ98W8K82EI1N5K6NU6JQW1A4ELBFTWNT56EQ1YP460", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "243", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:48:53 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "Q2KSL71R5UZ98W8K82EI1N5K6NU6JQW1A4ELBFTWNT56EQ1YP460", + "aws.response.body.RetryAttempts": "0", + "aws.sns.topic_arn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws_account": "000000000000", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client", + "topicname": "testTopic" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 0.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 914034299, + "start": 1730486933261601823 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.receivemessage", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229400000000", + "aws.agent": "botocore", + "aws.operation": "ReceiveMessage", + "aws.region": "us-east-1", + "aws.request.body.MessageAttributeNames.0": "_datadog", + "aws.request.body.QueueUrl": "http://localhost:4566/000000000000/Test", + "aws.request.body.WaitTimeSeconds": "2", + "aws.requestid": "NKWPYHQNVPLJRXAAPSDUJJ9NCY5AIQZZE7E9HJZX9NCWDDCWUFZX", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "2342", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:48:52 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "NKWPYHQNVPLJRXAAPSDUJJ9NCY5AIQZZE7E9HJZX9NCWDDCWUFZX", + "aws.response.body.RetryAttempts": "0", + "aws.sqs.queue_name": "Test", + "aws_account": "000000000000", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 877443506, + "start": 1730486932383733772 + }], +[ + { + "name": "sns.command", + "service": "test-botocore-tracing.sns", + "resource": "sns.publishbatch", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229300000000", + "aws.agent": "botocore", + "aws.operation": "PublishBatch", + "aws.region": "us-east-1", + "aws.request.body.PublishBatchRequestEntries.0.Id": "1", + "aws.request.body.PublishBatchRequestEntries.0.Message": "ironmaiden", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.eight.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.eight.StringValue": "eight", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.five.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.five.StringValue": "five", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.four.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.four.StringValue": "four", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.nine.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.nine.StringValue": "nine", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.one.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.one.StringValue": "one", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.seven.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.seven.StringValue": "seven", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.six.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.six.StringValue": "six", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.ten.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.ten.StringValue": "ten", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.three.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.three.StringValue": "three", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.two.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.two.StringValue": "two", + "aws.request.body.PublishBatchRequestEntries.1.Id": "2", + "aws.request.body.PublishBatchRequestEntries.1.Message": "megadeth", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.eight.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.eight.StringValue": "eight", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.five.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.five.StringValue": "five", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.four.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.four.StringValue": "four", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.nine.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.nine.StringValue": "nine", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.one.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.one.StringValue": "one", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.seven.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.seven.StringValue": "seven", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.six.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.six.StringValue": "six", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.ten.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.ten.StringValue": "ten", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.three.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.three.StringValue": "three", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.two.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.two.StringValue": "two", + "aws.request.body.TopicArn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws.requestid": "TNF9YUHZNEE2DH09T351ETMYYBHDO7IVF6AHLDHWCTGW8N1NALS9", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "493", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:48:51 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "TNF9YUHZNEE2DH09T351ETMYYBHDO7IVF6AHLDHWCTGW8N1NALS9", + "aws.response.body.RetryAttempts": "0", + "aws.sns.topic_arn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws_account": "000000000000", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client", + "topicname": "testTopic" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 930994951, + "start": 1730486931437714538 + }], +[ + { + "name": "sns.command", + "service": "aws.sns", + "resource": "sns.createtopic", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229100000000", + "aws.agent": "botocore", + "aws.operation": "CreateTopic", + "aws.region": "us-east-1", + "aws.request.body.Name": "testTopic", + "aws.requestid": "2SP1AKIYVI5YTSKQBPCT46FFRB3EHSPEF9HI80FKC5BOITPMG21L", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "347", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:48:50 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "2SP1AKIYVI5YTSKQBPCT46FFRB3EHSPEF9HI80FKC5BOITPMG21L", + "aws.response.body.RetryAttempts": "0", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 1156027576, + "start": 1730486929411636687 + }], +[ + { + "name": "sns.command", + "service": "aws.sns", + "resource": "sns.subscribe", + "trace_id": 4, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229200000000", + "aws.agent": "botocore", + "aws.operation": "Subscribe", + "aws.region": "us-east-1", + "aws.request.body.Endpoint": "redacted", + "aws.request.body.Protocol": "sqs", + "aws.request.body.TopicArn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws.requestid": "DRZH8BAT8J1JR5XVGQ39DH5R0UC7T0SFOAP4EG28LXF9T0B38UIL", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "390", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:48:50 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "DRZH8BAT8J1JR5XVGQ39DH5R0UC7T0SFOAP4EG28LXF9T0B38UIL", + "aws.response.body.RetryAttempts": "0", + "aws.sns.topic_arn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws_account": "000000000000", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client", + "topicname": "testTopic" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 869311702, + "start": 1730486930568014008 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 5, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229100000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "OZZ8QUD0LZ2VTC35GMOFDAOEC1QYXTHUXCOHQDE3FSHVTUIBPOC9", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 9532531, + "start": 1730486929363170092 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 6, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "6725229100000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "VICJ8RVZKMBFGEHY06P9E28AOEZKCU5R66AE00RSB229D8Z8Y8TR", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "7fd35701caac4cdcaad996ae75408bfc", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 8683, + "retry_attempts": 0 + }, + "duration": 7289635, + "start": 1730486929373616666 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sns_valid_config.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sns_valid_config.json new file mode 100644 index 00000000000..095f8179618 --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sns_valid_config.json @@ -0,0 +1,386 @@ +[[ + { + "name": "sns.command", + "service": "test-botocore-tracing.sns", + "resource": "sns.deletetopic", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8ac00000000", + "aws.agent": "botocore", + "aws.operation": "DeleteTopic", + "aws.region": "us-east-1", + "aws.request.body.TopicArn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws.requestid": "JVBHL3D33EU9ESFUBT6UKMVYQZSIF8BUYJOUNISNJE7722YAZCXZ", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "redacted", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "redacted", + "aws.response.body.HTTPHeaders.connection": "redacted", + "aws.response.body.HTTPHeaders.content-length": "redacted", + "aws.response.body.HTTPHeaders.content-type": "redacted", + "aws.response.body.HTTPHeaders.date": "redacted", + "aws.response.body.HTTPHeaders.server": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "JVBHL3D33EU9ESFUBT6UKMVYQZSIF8BUYJOUNISNJE7722YAZCXZ", + "aws.response.body.RetryAttempts": "0", + "aws.sns.topic_arn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws_account": "000000000000", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client", + "topicname": "testTopic" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 0.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 950339368, + "start": 1730996396594477506 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.receivemessage", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8ab00000000", + "aws.agent": "botocore", + "aws.operation": "ReceiveMessage", + "aws.region": "us-east-1", + "aws.request.body.MessageAttributeNames.0": "_datadog", + "aws.request.body.QueueUrl": "http://localhost:4566/000000000000/Test", + "aws.request.body.WaitTimeSeconds": "2", + "aws.requestid": "MIWENHGLXX91BMDPEB6MCEP2SKAJ75EERGWKOZIMLK73DBIP2Y2M", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "redacted", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "redacted", + "aws.response.body.HTTPHeaders.connection": "redacted", + "aws.response.body.HTTPHeaders.content-length": "redacted", + "aws.response.body.HTTPHeaders.content-type": "redacted", + "aws.response.body.HTTPHeaders.date": "redacted", + "aws.response.body.HTTPHeaders.server": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "MIWENHGLXX91BMDPEB6MCEP2SKAJ75EERGWKOZIMLK73DBIP2Y2M", + "aws.response.body.RetryAttempts": "0", + "aws.sqs.queue_name": "Test", + "aws_account": "000000000000", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 936840871, + "start": 1730996395657282340 + }], +[ + { + "name": "sns.command", + "service": "test-botocore-tracing.sns", + "resource": "sns.publishbatch", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8aa00000000", + "aws.agent": "botocore", + "aws.operation": "PublishBatch", + "aws.region": "us-east-1", + "aws.request.body.PublishBatchRequestEntries.0.Id": "redacted", + "aws.request.body.PublishBatchRequestEntries.0.Message": "redacted", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.eight.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.eight.StringValue": "eight", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.f\\.our.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.f\\.our.StringValue": "four", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.five.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.five.StringValue": "five", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.nine.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.nine.StringValue": "nine", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.one.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.one.StringValue": "one", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.seven.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.seven.StringValue": "seven", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.six.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.six.StringValue": "six", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.ten.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.ten.StringValue": "ten", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.three.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.three.StringValue": "three", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.two.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.0.MessageAttributes.two.StringValue": "two", + "aws.request.body.PublishBatchRequestEntries.1.Id": "redacted", + "aws.request.body.PublishBatchRequestEntries.1.Message": "redacted", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.eight.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.eight.StringValue": "eight", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.f\\.our.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.f\\.our.StringValue": "four", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.five.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.five.StringValue": "five", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.nine.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.nine.StringValue": "nine", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.one.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.one.StringValue": "one", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.seven.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.seven.StringValue": "seven", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.six.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.six.StringValue": "six", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.ten.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.ten.StringValue": "ten", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.three.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.three.StringValue": "three", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.two.DataType": "String", + "aws.request.body.PublishBatchRequestEntries.1.MessageAttributes.two.StringValue": "two", + "aws.request.body.TopicArn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws.requestid": "O6X3QA4Q41MYQ6XSGNDZCLA0PSKUKJ045M1LZW3JROSYD5XUFDUT", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "redacted", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "redacted", + "aws.response.body.HTTPHeaders.connection": "redacted", + "aws.response.body.HTTPHeaders.content-length": "redacted", + "aws.response.body.HTTPHeaders.content-type": "redacted", + "aws.response.body.HTTPHeaders.date": "redacted", + "aws.response.body.HTTPHeaders.server": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "O6X3QA4Q41MYQ6XSGNDZCLA0PSKUKJ045M1LZW3JROSYD5XUFDUT", + "aws.response.body.RetryAttempts": "0", + "aws.sns.topic_arn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws_account": "000000000000", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client", + "topicname": "testTopic" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 947785220, + "start": 1730996394696414263 + }], +[ + { + "name": "sns.command", + "service": "aws.sns", + "resource": "sns.createtopic", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8a800000000", + "aws.agent": "botocore", + "aws.operation": "CreateTopic", + "aws.region": "us-east-1", + "aws.request.body.Name": "testTopic", + "aws.requestid": "AMFHK8CWR6R3KC6JT993EINBSHRUAKJ0K8DDMQPFSKVG83GGCZFM", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "redacted", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "redacted", + "aws.response.body.HTTPHeaders.connection": "redacted", + "aws.response.body.HTTPHeaders.content-length": "redacted", + "aws.response.body.HTTPHeaders.content-type": "redacted", + "aws.response.body.HTTPHeaders.date": "redacted", + "aws.response.body.HTTPHeaders.server": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "AMFHK8CWR6R3KC6JT993EINBSHRUAKJ0K8DDMQPFSKVG83GGCZFM", + "aws.response.body.RetryAttempts": "0", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 1002298135, + "start": 1730996392768290024 + }], +[ + { + "name": "sns.command", + "service": "aws.sns", + "resource": "sns.subscribe", + "trace_id": 4, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8a900000000", + "aws.agent": "botocore", + "aws.operation": "Subscribe", + "aws.region": "us-east-1", + "aws.request.body.Endpoint": "redacted", + "aws.request.body.Protocol": "sqs", + "aws.request.body.TopicArn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws.requestid": "4CICZ8EQUUOACM1IORQNZEDQ33KF9L1RN77E4HL844IK3HR67905", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "redacted", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "redacted", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "redacted", + "aws.response.body.HTTPHeaders.connection": "redacted", + "aws.response.body.HTTPHeaders.content-length": "redacted", + "aws.response.body.HTTPHeaders.content-type": "redacted", + "aws.response.body.HTTPHeaders.date": "redacted", + "aws.response.body.HTTPHeaders.server": "redacted", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "4CICZ8EQUUOACM1IORQNZEDQ33KF9L1RN77E4HL844IK3HR67905", + "aws.response.body.RetryAttempts": "0", + "aws.sns.topic_arn": "arn:aws:sns:us-east-1:000000000000:testTopic", + "aws_account": "000000000000", + "aws_service": "sns", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client", + "topicname": "testTopic" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 925091785, + "start": 1730996393770937800 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 5, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8a800000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "1R7ANT6AC71E49ONC2STF3NHNM8YGOWXDNYGBPX1QGLXEB6MXAZ3", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 9878649, + "start": 1730996392724547367 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 6, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672ce8a800000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "I4Y5CAECJIZRS8C7GVJK7GQG0Y1NDGA95JPTBSWNCXIQGRNIOZS3", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "cb8f28ac358e4a2a93617272fb46ba0a", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 17253, + "retry_attempts": 0 + }, + "duration": 7524614, + "start": 1730996392735210004 + }]] diff --git a/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sqs.json b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sqs.json new file mode 100644 index 00000000000..af0f6cc4ee0 --- /dev/null +++ b/tests/snapshots/tests.contrib.botocore.test.BotocoreTest.test_aws_payload_tagging_sqs.json @@ -0,0 +1,205 @@ +[[ + { + "name": "sqs.command", + "service": "test-botocore-tracing.sqs", + "resource": "sqs.receivemessage", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672522b300000000", + "aws.agent": "botocore", + "aws.operation": "ReceiveMessage", + "aws.region": "us-east-1", + "aws.request.body.MessageAttributeNames.0": "_datadog", + "aws.request.body.QueueUrl": "http://localhost:4566/000000000000/Test", + "aws.request.body.WaitTimeSeconds": "2", + "aws.requestid": "WJ6C1QMRXCMYSO918PTT0Y8488VH2RZHPKHZZXQZ1E3L079PE2PE", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "654", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:49:24 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "WJ6C1QMRXCMYSO918PTT0Y8488VH2RZHPKHZZXQZ1E3L079PE2PE", + "aws.response.body.RetryAttempts": "0", + "aws.sqs.queue_name": "Test", + "aws_account": "000000000000", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "e45a6c451bfb47bfbb0f4f36bd0803a3", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 9691, + "retry_attempts": 0 + }, + "duration": 895390012, + "start": 1730486964009311666 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.listqueues", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672522b200000000", + "aws.agent": "botocore", + "aws.operation": "ListQueues", + "aws.region": "us-east-1", + "aws.requestid": "GCXA1QNSIT8ZCO2ER8SPI6U6QPTINWZKK1J0QLJE4FV89SOAG7VY", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "region": "us-east-1", + "runtime-id": "e45a6c451bfb47bfbb0f4f36bd0803a3", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 9691, + "retry_attempts": 0 + }, + "duration": 9492759, + "start": 1730486963003404555 + }], +[ + { + "name": "sqs.command", + "service": "aws.sqs", + "resource": "sqs.createqueue", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672522b300000000", + "aws.agent": "botocore", + "aws.operation": "CreateQueue", + "aws.region": "us-east-1", + "aws.requestid": "A3R2CT5ZWO1XH9GEAT7L232IPRFJSCTBRRGO9LCSBEUX0T8KHC1O", + "aws.sqs.queue_name": "Test", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "e45a6c451bfb47bfbb0f4f36bd0803a3", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 9691, + "retry_attempts": 0 + }, + "duration": 7553649, + "start": 1730486963013740697 + }], +[ + { + "name": "sqs.command", + "service": "test-botocore-tracing.sqs", + "resource": "sqs.sendmessage", + "trace_id": 3, + "span_id": 1, + "parent_id": 0, + "type": "http", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "_dd.p.tid": "672522b300000000", + "aws.agent": "botocore", + "aws.operation": "SendMessage", + "aws.region": "us-east-1", + "aws.request.body.MessageAttributes.eight.DataType": "String", + "aws.request.body.MessageAttributes.eight.StringValue": "eight", + "aws.request.body.MessageAttributes.five.DataType": "String", + "aws.request.body.MessageAttributes.five.StringValue": "five", + "aws.request.body.MessageAttributes.four.DataType": "String", + "aws.request.body.MessageAttributes.four.StringValue": "four", + "aws.request.body.MessageAttributes.nine.DataType": "String", + "aws.request.body.MessageAttributes.nine.StringValue": "nine", + "aws.request.body.MessageAttributes.one.DataType": "String", + "aws.request.body.MessageAttributes.one.StringValue": "one", + "aws.request.body.MessageAttributes.seven.DataType": "String", + "aws.request.body.MessageAttributes.seven.StringValue": "seven", + "aws.request.body.MessageAttributes.six.DataType": "String", + "aws.request.body.MessageAttributes.six.StringValue": "six", + "aws.request.body.MessageAttributes.ten.DataType": "String", + "aws.request.body.MessageAttributes.ten.StringValue": "ten", + "aws.request.body.MessageAttributes.three.DataType": "String", + "aws.request.body.MessageAttributes.three.StringValue": "three", + "aws.request.body.MessageAttributes.two.DataType": "String", + "aws.request.body.MessageAttributes.two.StringValue": "two", + "aws.request.body.MessageBody": "world", + "aws.request.body.QueueUrl": "http://localhost:4566/000000000000/Test", + "aws.requestid": "NLK4TDG9YGTYRAMQ6QHN6NWC6QKRSSIZ8IBF0DS4RTY2Y3YWA7XC", + "aws.response.body.HTTPHeaders.access-control-allow-headers": "authorization,cache-control,content-length,content-md5,content-type,etag,location,x-amz-acl,x-amz-content-sha256,x-amz-date,x-amz-request-id,x-amz-security-token,x-amz-tagging,x-amz-target,x-amz-user-agent,x-amz-version-id,x-amzn-requestid,x-localstack-target,amz-sdk-invocation-id,amz-sdk-request", + "aws.response.body.HTTPHeaders.access-control-allow-methods": "HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH", + "aws.response.body.HTTPHeaders.access-control-allow-origin": "*", + "aws.response.body.HTTPHeaders.access-control-expose-headers": "etag,x-amz-version-id", + "aws.response.body.HTTPHeaders.connection": "close", + "aws.response.body.HTTPHeaders.content-length": "493", + "aws.response.body.HTTPHeaders.content-type": "text/xml", + "aws.response.body.HTTPHeaders.date": "Fri, 01 Nov 2024 18:49:23 GMT", + "aws.response.body.HTTPHeaders.server": "hypercorn-h11", + "aws.response.body.HTTPStatusCode": "200", + "aws.response.body.RequestId": "NLK4TDG9YGTYRAMQ6QHN6NWC6QKRSSIZ8IBF0DS4RTY2Y3YWA7XC", + "aws.response.body.RetryAttempts": "0", + "aws.sqs.queue_name": "Test", + "aws_account": "000000000000", + "aws_service": "sqs", + "component": "botocore", + "http.status_code": "200", + "language": "python", + "queuename": "Test", + "region": "us-east-1", + "runtime-id": "e45a6c451bfb47bfbb0f4f36bd0803a3", + "span.kind": "client" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 9691, + "retry_attempts": 0 + }, + "duration": 964509306, + "start": 1730486963031752521 + }]] diff --git a/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg2_query_default.json b/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg2_query_default.json index 9e6545e4228..de9cdf41eb5 100644 --- a/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg2_query_default.json +++ b/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg2_query_default.json @@ -48,6 +48,7 @@ "db.system": "postgresql", "db.user": "postgres", "out.host": "127.0.0.1", + "server.address": "127.0.0.1", "span.kind": "client" }, "metrics": { diff --git a/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg3_query_default.json b/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg3_query_default.json index 6d4d06cfc64..c0d50873757 100644 --- a/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg3_query_default.json +++ b/tests/snapshots/tests.contrib.django.test_django_snapshots.test_psycopg3_query_default.json @@ -48,6 +48,7 @@ "db.system": "postgresql", "db.user": "postgres", "out.host": "127.0.0.1", + "server.address": "127.0.0.1", "span.kind": "client" }, "metrics": { diff --git a/tests/tracer/test_trace_utils.py b/tests/tracer/test_trace_utils.py index 1fefc505d7f..42cb77fe0b2 100644 --- a/tests/tracer/test_trace_utils.py +++ b/tests/tracer/test_trace_utils.py @@ -595,6 +595,27 @@ def test_set_http_meta_case_sensitive_headers_notfound(mock_store_headers, span, mock_store_headers.assert_called() +ALL_IP_HEADERS = ( + ("x-forwarded-for", "1.1.1.1"), + ("x-real-ip", "2.2.2.2"), + ("true-client-ip", "3.3.3.3"), + ("x-client-ip", "4.4.4.4"), + ("x-forwarded", "5.5.5.5"), + ("forwarded-for", "6.6.6.6"), + ("x-cluster-client-ip", "7.7.7.7"), + ("fastly-client-ip", "8.8.8.8"), + ("cf-connecting-ip", "9.9.9.9"), + ("cf-connecting-ipv6", "10.10.10.10"), +) + +# testing priority order +ALL_TESTS = [ + ["", dict(ALL_IP_HEADERS[-1 : -i - 2 : -1]), ALL_IP_HEADERS[-1 - i][1]] for i in range(len(ALL_IP_HEADERS)) +] +# x-forwarded is now ignored so we fall back to forwarded-for +ALL_TESTS[5][2] = "6.6.6.6" + + @pytest.mark.parametrize( "header_env_var,headers_dict,expected", [ @@ -636,7 +657,8 @@ def test_set_http_meta_case_sensitive_headers_notfound(mock_store_headers, span, {"x-forwarded-for": "4.4.4.4", "x-real-ip": "8.8.4.4"}, "8.8.4.4", ), - ], + ] + + ALL_TESTS, ) def test_get_request_header_ip(header_env_var, headers_dict, expected, span): with override_global_config(dict(_asm_enabled=True, _client_ip_header=header_env_var)): diff --git a/tests/tracer/test_utils.py b/tests/tracer/test_utils.py index 3603c9105b2..b24396fde7b 100644 --- a/tests/tracer/test_utils.py +++ b/tests/tracer/test_utils.py @@ -589,3 +589,8 @@ def test_hourglass_turn(): while hg.trickling(): sleep(0.1) assert sw.elapsed() > 1.1 + + +def test_hourglass_sorting(): + """Test that we can sort hourglasses.""" + sorted(time.HourGlass(1) for _ in range(100))