From 0d228eba1d7c129842b3f65a3a01506478726f8c Mon Sep 17 00:00:00 2001 From: ZStriker19 Date: Tue, 7 Jan 2025 14:07:02 -0500 Subject: [PATCH] add notes for changes and where to make changes --- ddtrace/_trace/trace_handlers.py | 12 ++++++++---- ddtrace/contrib/internal/flask/patch.py | 1 + ddtrace/contrib/internal/wsgi/wsgi.py | 6 +++++- ddtrace/contrib/trace_utils.py | 2 ++ ddtrace/internal/core/__init__.py | 2 ++ 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/ddtrace/_trace/trace_handlers.py b/ddtrace/_trace/trace_handlers.py index 7c2ba02d6b4..40d75b640e5 100644 --- a/ddtrace/_trace/trace_handlers.py +++ b/ddtrace/_trace/trace_handlers.py @@ -106,14 +106,18 @@ def _get_parameters_for_new_span_directly_from_context(ctx: core.ExecutionContex return span_kwargs -def _start_span(ctx: core.ExecutionContext, call_trace: bool = True, **kwargs) -> "Span": +def _start_span(ctx: core.ExecutionContext, call_trace: bool = True, activate_distributed_headers, **kwargs) -> "Span": span_kwargs = _get_parameters_for_new_span_directly_from_context(ctx) call_trace = ctx.get_item("call_trace", call_trace) tracer = (ctx.get_item("middleware") or ctx["pin"]).tracer - distributed_headers_config = ctx.get_item("distributed_headers_config") - if distributed_headers_config: + import pdb; pdb.set_trace() + # this returns ddtrace.settings.integration.IntegrationConfig, we should change the naming to reflect this + # from distributed_headers_config to integration_config. Need to do this for calls of method too + integration_config = ctx.get_item("distributed_headers_config") + activate_distributed_headers = ctx.get_item("activate_distributed_headers") + if integration_config and activate_distributed_headers: trace_utils.activate_distributed_headers( - tracer, int_config=distributed_headers_config, request_headers=ctx["distributed_headers"] + tracer, int_config=integration_config, request_headers=ctx["distributed_headers"] ) distributed_context = ctx.get_item("distributed_context") if distributed_context and not call_trace: diff --git a/ddtrace/contrib/internal/flask/patch.py b/ddtrace/contrib/internal/flask/patch.py index 429a9d05667..023a9e899ef 100644 --- a/ddtrace/contrib/internal/flask/patch.py +++ b/ddtrace/contrib/internal/flask/patch.py @@ -532,6 +532,7 @@ def _patched_request(pin, wrapped, instance, args, kwargs): block_request_callable=_block_request_callable, ignored_exception_type=NotFound, tags={COMPONENT: config.flask.integration_name}, + activate_distributed_headers=True, ) as ctx, ctx.span: core.dispatch("flask._patched_request", (ctx,)) return wrapped(*args, **kwargs) diff --git a/ddtrace/contrib/internal/wsgi/wsgi.py b/ddtrace/contrib/internal/wsgi/wsgi.py index da86aa8f21e..0b227b390b5 100644 --- a/ddtrace/contrib/internal/wsgi/wsgi.py +++ b/ddtrace/contrib/internal/wsgi/wsgi.py @@ -95,6 +95,9 @@ def __call__(self, environ: Iterable, start_response: Callable) -> wrapt.ObjectP headers = get_request_headers(environ) closing_iterable = () not_blocked = True + # this would be the fix, we'd basically just add a kwarg activate_distributed_headers, and then + # when we call _start_span in core, we check if it's set to true or not, if it's not + # (for every non local root span) we don't call activate_distributed_headers with core.context_with_data( "wsgi.__call__", remote_addr=environ.get("REMOTE_ADDR"), @@ -104,8 +107,9 @@ def __call__(self, environ: Iterable, start_response: Callable) -> wrapt.ObjectP span_type=SpanTypes.WEB, span_name=(self._request_call_name if hasattr(self, "_request_call_name") else self._request_span_name), middleware_config=self._config, - distributed_headers_config=self._config, + integration_config=self._config, distributed_headers=environ, + activate_distributed_headers=True, environ=environ, middleware=self, span_key="req_span", diff --git a/ddtrace/contrib/trace_utils.py b/ddtrace/contrib/trace_utils.py index db8509d8c35..6254b64ff2a 100644 --- a/ddtrace/contrib/trace_utils.py +++ b/ddtrace/contrib/trace_utils.py @@ -576,7 +576,9 @@ def activate_distributed_headers(tracer, int_config=None, request_headers=None, # # app = Flask(__name__) # Traced via Flask instrumentation # app = DDWSGIMiddleware(app) # Extra layer on top for WSGI + # this is what's saving us from resetting context over and over again current_context = tracer.current_trace_context() + # we'll error here because we don't have a trace_id etc. if current_context and current_context.trace_id == context.trace_id: log.debug( "will not activate extracted Context(trace_id=%r, span_id=%r), a context with that trace id is already active", # noqa: E501 diff --git a/ddtrace/internal/core/__init__.py b/ddtrace/internal/core/__init__.py index da31218f73c..3ab9871b34a 100644 --- a/ddtrace/internal/core/__init__.py +++ b/ddtrace/internal/core/__init__.py @@ -175,7 +175,9 @@ def __init__( def __enter__(self) -> "ExecutionContext": if self._span is None and "_CURRENT_CONTEXT" in globals(): self._token: contextvars.Token["ExecutionContext"] = _CURRENT_CONTEXT.set(self) + # context.started.flask._patched_request dispatch("context.started.%s" % self.identifier, (self,)) + # context.started.start_span.flask._patched_request dispatch("context.started.start_span.%s" % self.identifier, (self,)) return self