From 2be94ca479e1a46a7ee053f0f6e6d733093a463e Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 13:14:52 +0200 Subject: [PATCH 01/36] feat: Send GenAI spans as V2 envelope items --- sentry_sdk/client.py | 105 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 103 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 9f795d2489..ed58104ec7 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -27,6 +27,7 @@ get_before_send_metric, has_logs_enabled, has_metrics_enabled, + serialize_attribute, ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace @@ -56,6 +57,74 @@ ) from sentry_sdk.scrubber import EventScrubber from sentry_sdk.monitor import Monitor +from sentry_sdk.envelope import Item, PayloadRef + + +_ISO_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + + +def _iso_to_epoch(iso_str: str) -> float: + return ( + datetime.strptime(iso_str, _ISO_TIMESTAMP_FORMAT) + .replace(tzinfo=timezone.utc) + .timestamp() + ) + + +def _v1_span_to_v2(span: "Dict[str, Any]", event: "Dict[str, Any]") -> "Dict[str, Any]": + rv: "Dict[str, Any]" = { + "trace_id": span["trace_id"], + "span_id": span["span_id"], + "name": span.get("description") or "", + "is_segment": False, + "start_timestamp": _iso_to_epoch(span["start_timestamp"]), + "status": "ok", + } + + if span.get("timestamp"): + rv["end_timestamp"] = _iso_to_epoch(span["timestamp"]) + + if span.get("parent_span_id"): + rv["parent_span_id"] = span["parent_span_id"] + + status = span.get("status") + if status and status != "ok": + rv["status"] = "error" + + attributes: "Dict[str, Any]" = {} + + if span.get("op"): + attributes["sentry.op"] = span["op"] + if span.get("origin"): + attributes["sentry.origin"] = span["origin"] + + for key, value in (span.get("data") or {}).items(): + attributes[key] = value + for key, value in (span.get("tags") or {}).items(): + attributes[key] = value + + trace_context = event.get("contexts", {}).get("trace", {}) + sdk_info = event.get("sdk", {}) + + if event.get("release"): + attributes["sentry.release"] = event["release"] + if event.get("environment"): + attributes["sentry.environment"] = event["environment"] + if event.get("transaction"): + attributes["sentry.segment.name"] = event["transaction"] + + if trace_context.get("span_id"): + attributes["sentry.segment.id"] = trace_context["span_id"] + if sdk_info.get("name"): + attributes["sentry.sdk.name"] = sdk_info["name"] + if sdk_info.get("version"): + attributes["sentry.sdk.version"] = sdk_info["version"] + + if attributes: + rv["attributes"] = {k: serialize_attribute(v) for k, v in attributes.items()} + + return rv + if TYPE_CHECKING: from typing import Any @@ -72,7 +141,7 @@ from sentry_sdk.session import Session from sentry_sdk.spotlight import SpotlightClient from sentry_sdk.traces import StreamedSpan - from sentry_sdk.transport import Transport, Item + from sentry_sdk.transport import Transport, Item, PayloadRef from sentry_sdk._log_batcher import LogBatcher from sentry_sdk._metrics_batcher import MetricsBatcher from sentry_sdk.utils import Dsn @@ -912,7 +981,39 @@ def capture_event( if is_transaction: if isinstance(profile, Profile): envelope.add_profile(profile.to_json(event_opt, self.options)) - envelope.add_transaction(event_opt) + + nonstreamed_spans = [] + streamed_spans = [] + for span in event_opt.get("spans") or []: + span_op = span.get("op") + if span_op is not None and span_op.startswith("gen_ai."): + streamed_spans.append(span) + else: + nonstreamed_spans.append(span) + + if nonstreamed_spans: + event_opt["spans"] = nonstreamed_spans + envelope.add_transaction(event_opt) + + if streamed_spans: + envelope.add_item( + Item( + type=SpanBatcher.TYPE, + content_type=SpanBatcher.CONTENT_TYPE, + headers={ + "item_count": len(streamed_spans), + }, + payload=PayloadRef( + json={ + "items": [ + _v1_span_to_v2(span, event) + for span in streamed_spans + ] + }, + ), + ) + ) + elif is_checkin: envelope.add_checkin(event_opt) else: From 01f479a09e4791082da604ba0f57cc4b74f1bf2f Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 15:42:59 +0200 Subject: [PATCH 02/36] . --- sentry_sdk/client.py | 213 ++++++++++++++++++++++++++----------------- 1 file changed, 130 insertions(+), 83 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index ed58104ec7..8667c2b194 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -2,7 +2,7 @@ import uuid import random import socket -from collections.abc import Mapping +from collections.abc import Mapping, Iterable from datetime import datetime, timezone from importlib import import_module from typing import TYPE_CHECKING, List, Dict, cast, overload @@ -58,104 +58,156 @@ from sentry_sdk.scrubber import EventScrubber from sentry_sdk.monitor import Monitor from sentry_sdk.envelope import Item, PayloadRef +from sentry_sdk.utils import datetime_from_isoformat +if TYPE_CHECKING: + from typing import Any + from typing import Callable + from typing import Optional + from typing import Sequence + from typing import Type + from typing import Union + from typing import TypeVar + + from sentry_sdk._types import Event, Hint, SDKInfo, Log, Metric, EventDataCategory + from sentry_sdk.integrations import Integration + from sentry_sdk.scope import Scope + from sentry_sdk.session import Session + from sentry_sdk.spotlight import SpotlightClient + from sentry_sdk.traces import StreamedSpan + from sentry_sdk.transport import Transport, Item, PayloadRef + from sentry_sdk._log_batcher import LogBatcher + from sentry_sdk._metrics_batcher import MetricsBatcher + from sentry_sdk.utils import Dsn -_ISO_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" + I = TypeVar("I", bound=Integration) # noqa: E741 +_client_init_debug = ContextVar("client_init_debug") -def _iso_to_epoch(iso_str: str) -> float: - return ( - datetime.strptime(iso_str, _ISO_TIMESTAMP_FORMAT) - .replace(tzinfo=timezone.utc) - .timestamp() - ) +SDK_INFO: "SDKInfo" = { + "name": "sentry.python", # SDK name will be overridden after integrations have been loaded with sentry_sdk.integrations.setup_integrations() + "version": VERSION, + "packages": [{"name": "pypi:sentry-sdk", "version": VERSION}], +} -def _v1_span_to_v2(span: "Dict[str, Any]", event: "Dict[str, Any]") -> "Dict[str, Any]": - rv: "Dict[str, Any]" = { - "trace_id": span["trace_id"], - "span_id": span["span_id"], - "name": span.get("description") or "", - "is_segment": False, - "start_timestamp": _iso_to_epoch(span["start_timestamp"]), +def _serialized_v1_span_to_serialized_v2_span( + span: "Dict[str, Any]", event: "Event" +) -> "dict[str, Any]": + # See SpanBatcher._to_transport_format() for analogous population of all entries except "attributes". + res: "Dict[str, Any]" = { "status": "ok", + "is_segment": False, } - if span.get("timestamp"): - rv["end_timestamp"] = _iso_to_epoch(span["timestamp"]) + if "trace_id" in span: + res["trace_id"] = span["trace_id"] + + if "span_id" in span: + res["span_id"] = span["span_id"] + + if "description" in span: + res["name"] = span["description"] - if span.get("parent_span_id"): - rv["parent_span_id"] = span["parent_span_id"] + if "start_timestamp" in span: + start_timestamp = None + try: + start_timestamp = datetime_from_isoformat(span["start_timestamp"]) + except Exception: + pass + + if start_timestamp is not None: + res["start_timestamp"] = start_timestamp.timestamp() + + if "timestamp" in span: + end_timestamp = None + try: + end_timestamp = datetime_from_isoformat(span["timestamp"]) + except Exception: + pass - status = span.get("status") - if status and status != "ok": - rv["status"] = "error" + if end_timestamp is not None: + res["end_timestamp"] = end_timestamp.timestamp() + + if "parent_span_id" in span: + res["parent_span_id"] = span["parent_span_id"] + + if "status" in span and span["status"] != "ok": + res["status"] = "error" attributes: "Dict[str, Any]" = {} - if span.get("op"): + if "op" in span: attributes["sentry.op"] = span["op"] - if span.get("origin"): + if "origin" in span: attributes["sentry.origin"] = span["origin"] - for key, value in (span.get("data") or {}).items(): - attributes[key] = value - for key, value in (span.get("tags") or {}).items(): - attributes[key] = value - - trace_context = event.get("contexts", {}).get("trace", {}) - sdk_info = event.get("sdk", {}) - - if event.get("release"): + span_data = span.get("data") + if isinstance(span_data, dict): + attributes.update(span_data) + + span_tags = span.get("tags") + if isinstance(span_tags, dict): + attributes.update(span_tags) + + # See Scope._apply_user_attributes_to_telemetry() for user attributes. + user = event.get("user") + if isinstance(user, dict): + if "id" in user: + attributes["user.id"] = user["id"] + if "username" in user: + attributes["user.name"] = user["username"] + if "email" in user: + attributes["user.email"] = user["email"] + + # See Scope.set_global_attributes() for release, environment, and SDK metadata. + if "release" in event: attributes["sentry.release"] = event["release"] - if event.get("environment"): + if "environment" in event: attributes["sentry.environment"] = event["environment"] - if event.get("transaction"): + if "transaction" in event: attributes["sentry.segment.name"] = event["transaction"] - if trace_context.get("span_id"): + trace_context = event.get("contexts", {}).get("trace", {}) + if "span_id" in trace_context: attributes["sentry.segment.id"] = trace_context["span_id"] - if sdk_info.get("name"): - attributes["sentry.sdk.name"] = sdk_info["name"] - if sdk_info.get("version"): - attributes["sentry.sdk.version"] = sdk_info["version"] + + sdk_info = event.get("sdk") + if isinstance(sdk_info, dict): + if "name" in sdk_info: + attributes["sentry.sdk.name"] = sdk_info["name"] + if "version" in sdk_info: + attributes["sentry.sdk.version"] = sdk_info["version"] if attributes: - rv["attributes"] = {k: serialize_attribute(v) for k, v in attributes.items()} + res["attributes"] = {k: serialize_attribute(v) for k, v in attributes.items()} - return rv + return res -if TYPE_CHECKING: - from typing import Any - from typing import Callable - from typing import Optional - from typing import Sequence - from typing import Type - from typing import Union - from typing import TypeVar +def _split_gen_ai_spans( + event_opt: "Event", +) -> "tuple[List[Dict[str, object]], List[Dict[str, object]]]": + if "spans" not in event_opt: + return [], [] - from sentry_sdk._types import Event, Hint, SDKInfo, Log, Metric, EventDataCategory - from sentry_sdk.integrations import Integration - from sentry_sdk.scope import Scope - from sentry_sdk.session import Session - from sentry_sdk.spotlight import SpotlightClient - from sentry_sdk.traces import StreamedSpan - from sentry_sdk.transport import Transport, Item, PayloadRef - from sentry_sdk._log_batcher import LogBatcher - from sentry_sdk._metrics_batcher import MetricsBatcher - from sentry_sdk.utils import Dsn + spans = event_opt["spans"] + if isinstance(spans, AnnotatedValue): + spans = spans.value - I = TypeVar("I", bound=Integration) # noqa: E741 - -_client_init_debug = ContextVar("client_init_debug") + if not isinstance(spans, Iterable): + return [], [] + non_gen_ai_spans = [] + gen_ai_spans = [] + for span in spans: + span_op = span.get("op") + if isinstance(span_op, str) and span_op.startswith("gen_ai."): + gen_ai_spans.append(span) + else: + non_gen_ai_spans.append(span) -SDK_INFO: "SDKInfo" = { - "name": "sentry.python", # SDK name will be overridden after integrations have been loaded with sentry_sdk.integrations.setup_integrations() - "version": VERSION, - "packages": [{"name": "pypi:sentry-sdk", "version": VERSION}], -} + return non_gen_ai_spans, gen_ai_spans def _get_options(*args: "Optional[str]", **kwargs: "Any") -> "Dict[str, Any]": @@ -982,32 +1034,27 @@ def capture_event( if isinstance(profile, Profile): envelope.add_profile(profile.to_json(event_opt, self.options)) - nonstreamed_spans = [] - streamed_spans = [] - for span in event_opt.get("spans") or []: - span_op = span.get("op") - if span_op is not None and span_op.startswith("gen_ai."): - streamed_spans.append(span) - else: - nonstreamed_spans.append(span) + non_gen_ai_spans, gen_ai_spans = _split_gen_ai_spans(event_opt) - if nonstreamed_spans: - event_opt["spans"] = nonstreamed_spans - envelope.add_transaction(event_opt) + event_opt["spans"] = non_gen_ai_spans + envelope.add_transaction(event_opt) - if streamed_spans: + if gen_ai_spans: envelope.add_item( Item( type=SpanBatcher.TYPE, content_type=SpanBatcher.CONTENT_TYPE, headers={ - "item_count": len(streamed_spans), + "item_count": len(gen_ai_spans), }, payload=PayloadRef( json={ "items": [ - _v1_span_to_v2(span, event) - for span in streamed_spans + _serialized_v1_span_to_serialized_v2_span( + span, event + ) + for span in gen_ai_spans + if isinstance(span, dict) ] }, ), From 80e6a106b8472f6a6984ab254ca56646f0d51e59 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 15:43:59 +0200 Subject: [PATCH 03/36] . --- sentry_sdk/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 8667c2b194..41ab81c58e 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -75,7 +75,7 @@ from sentry_sdk.session import Session from sentry_sdk.spotlight import SpotlightClient from sentry_sdk.traces import StreamedSpan - from sentry_sdk.transport import Transport, Item, PayloadRef + from sentry_sdk.transport import Transport, Item from sentry_sdk._log_batcher import LogBatcher from sentry_sdk._metrics_batcher import MetricsBatcher from sentry_sdk.utils import Dsn From 0622cf410d9c6496d81d50ce163f52fa1d97eaee Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 15:44:35 +0200 Subject: [PATCH 04/36] . --- sentry_sdk/client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 41ab81c58e..2895f23436 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -84,6 +84,7 @@ _client_init_debug = ContextVar("client_init_debug") + SDK_INFO: "SDKInfo" = { "name": "sentry.python", # SDK name will be overridden after integrations have been loaded with sentry_sdk.integrations.setup_integrations() "version": VERSION, From 7c75da105649abe57a6e32946507d97c85c86123 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 16:01:06 +0200 Subject: [PATCH 05/36] . --- sentry_sdk/client.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 2895f23436..7bb2acf7dc 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -188,16 +188,16 @@ def _serialized_v1_span_to_serialized_v2_span( def _split_gen_ai_spans( event_opt: "Event", -) -> "tuple[List[Dict[str, object]], List[Dict[str, object]]]": +) -> "Optional[tuple[List[Dict[str, object]], List[Dict[str, object]]]]": if "spans" not in event_opt: - return [], [] + return None spans = event_opt["spans"] if isinstance(spans, AnnotatedValue): spans = spans.value if not isinstance(spans, Iterable): - return [], [] + return None non_gen_ai_spans = [] gen_ai_spans = [] @@ -1035,12 +1035,15 @@ def capture_event( if isinstance(profile, Profile): envelope.add_profile(profile.to_json(event_opt, self.options)) - non_gen_ai_spans, gen_ai_spans = _split_gen_ai_spans(event_opt) + split_spans = _split_gen_ai_spans(event_opt) + if split_spans is None or not split_spans[1]: + envelope.add_transaction(event_opt) + else: + non_gen_ai_spans, gen_ai_spans = split_spans - event_opt["spans"] = non_gen_ai_spans - envelope.add_transaction(event_opt) + event_opt["spans"] = non_gen_ai_spans + envelope.add_transaction(event_opt) - if gen_ai_spans: envelope.add_item( Item( type=SpanBatcher.TYPE, From 54a9b073a5887cdc51bd2d23253014e1bcb55c0f Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 16:08:42 +0200 Subject: [PATCH 06/36] update --- sentry_sdk/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 7bb2acf7dc..9ee225150d 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -93,10 +93,10 @@ def _serialized_v1_span_to_serialized_v2_span( - span: "Dict[str, Any]", event: "Event" + span: "dict[str, Any]", event: "Event" ) -> "dict[str, Any]": # See SpanBatcher._to_transport_format() for analogous population of all entries except "attributes". - res: "Dict[str, Any]" = { + res: "dict[str, Any]" = { "status": "ok", "is_segment": False, } From d1aa07cb2c201ab69a130e9b1b3705f2330d629b Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 16:48:38 +0200 Subject: [PATCH 07/36] . --- sentry_sdk/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 9ee225150d..e02841d5a3 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -192,7 +192,7 @@ def _split_gen_ai_spans( if "spans" not in event_opt: return None - spans = event_opt["spans"] + spans: "Any" = event_opt["spans"] if isinstance(spans, AnnotatedValue): spans = spans.value From 117a6c9bf47342883a8cd4546582be97d39ad996 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 18:17:04 +0200 Subject: [PATCH 08/36] . --- sentry_sdk/client.py | 62 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index e02841d5a3..7c1eb64cff 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -7,6 +7,7 @@ from importlib import import_module from typing import TYPE_CHECKING, List, Dict, cast, overload import warnings +import json from sentry_sdk._compat import check_uwsgi_thread_support from sentry_sdk._metrics_batcher import MetricsBatcher @@ -27,10 +28,10 @@ get_before_send_metric, has_logs_enabled, has_metrics_enabled, - serialize_attribute, ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace +from sentry_sdk.traces import SpanStatus from sentry_sdk.tracing_utils import has_span_streaming_enabled from sentry_sdk.transport import ( HttpTransportCore, @@ -39,6 +40,7 @@ ) from sentry_sdk.consts import ( SPANDATA, + SPANSTATUS, DEFAULT_MAX_VALUE_LENGTH, DEFAULT_OPTIONS, INSTRUMENTER, @@ -97,7 +99,7 @@ def _serialized_v1_span_to_serialized_v2_span( ) -> "dict[str, Any]": # See SpanBatcher._to_transport_format() for analogous population of all entries except "attributes". res: "dict[str, Any]" = { - "status": "ok", + "status": SpanStatus.OK.value, "is_segment": False, } @@ -133,7 +135,7 @@ def _serialized_v1_span_to_serialized_v2_span( if "parent_span_id" in span: res["parent_span_id"] = span["parent_span_id"] - if "status" in span and span["status"] != "ok": + if "status" in span and span["status"] != SPANSTATUS.OK: res["status"] = "error" attributes: "Dict[str, Any]" = {} @@ -180,8 +182,58 @@ def _serialized_v1_span_to_serialized_v2_span( if "version" in sdk_info: attributes["sentry.sdk.version"] = sdk_info["version"] - if attributes: - res["attributes"] = {k: serialize_attribute(v) for k, v in attributes.items()} + for key, value in attributes.items(): + serialized_value = serialize(value) + if isinstance(serialized_value, bool): + res.setdefault("attributes", {})[key] = { + "value": serialized_value, + "type": "boolean", + } + continue + + if isinstance(serialized_value, int): + res.setdefault("attributes", {})[key] = { + "value": serialized_value, + "type": "integer", + } + continue + + if isinstance(serialized_value, float): + res.setdefault("attributes", {})[key] = { + "value": serialized_value, + "type": "double", + } + continue + + if isinstance(serialized_value, str): + res.setdefault("attributes", {})[key] = { + "value": serialized_value, + "type": "string", + } + continue + + if isinstance(serialized_value, list): + if not serialized_value: + res.setdefault("attributes", {})[key] = {"value": [], "type": "array"} + + ty = type(serialized_value[0]) + if ty in (int, str, bool, float) and all( + type(v) is ty for v in serialized_value + ): + res.setdefault("attributes", {})[key] = { + "value": serialized_value, + "type": "array", + } + + continue + + # Types returned when the serializer for V1 span attributes recurses into some container types. + if isinstance(serialized_value, (dict, list)): + res.setdefault("attributes", {})[key] = { + "value": json.dumps(serialized_value), + "type": "string", + } + continue return res From 83c36b54c0c46847531db66f2ddc3d6d592d8a95 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Wed, 15 Apr 2026 18:25:21 +0200 Subject: [PATCH 09/36] . --- sentry_sdk/client.py | 118 ++++++++++++++++++++++++------------------- 1 file changed, 66 insertions(+), 52 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 7c1eb64cff..c6df2f564b 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -71,7 +71,15 @@ from typing import Union from typing import TypeVar - from sentry_sdk._types import Event, Hint, SDKInfo, Log, Metric, EventDataCategory + from sentry_sdk._types import ( + Event, + Hint, + SDKInfo, + Log, + Metric, + EventDataCategory, + SerializedAttributeValue, + ) from sentry_sdk.integrations import Integration from sentry_sdk.scope import Scope from sentry_sdk.session import Session @@ -94,6 +102,56 @@ } +def _serialized_v1_attribute_to_serialized_v2_attribute( + attribute_value: "Any", +) -> "Optional[SerializedAttributeValue]": + if isinstance(attribute_value, bool): + return { + "value": attribute_value, + "type": "boolean", + } + + if isinstance(attribute_value, int): + return { + "value": attribute_value, + "type": "integer", + } + + if isinstance(attribute_value, float): + return { + "value": attribute_value, + "type": "double", + } + + if isinstance(attribute_value, str): + return { + "value": attribute_value, + "type": "string", + } + + if isinstance(attribute_value, list): + if not attribute_value: + return {"value": [], "type": "array"} + + ty = type(attribute_value[0]) + if ty in (int, str, bool, float) and all( + type(v) is ty for v in attribute_value + ): + return { + "value": attribute_value, + "type": "array", + } + + # Types returned when the serializer for V1 span attributes recurses into some container types. + if isinstance(attribute_value, (dict, list)): + return { + "value": json.dumps(attribute_value), + "type": "string", + } + + return None + + def _serialized_v1_span_to_serialized_v2_span( span: "dict[str, Any]", event: "Event" ) -> "dict[str, Any]": @@ -182,58 +240,14 @@ def _serialized_v1_span_to_serialized_v2_span( if "version" in sdk_info: attributes["sentry.sdk.version"] = sdk_info["version"] - for key, value in attributes.items(): - serialized_value = serialize(value) - if isinstance(serialized_value, bool): - res.setdefault("attributes", {})[key] = { - "value": serialized_value, - "type": "boolean", - } - continue - - if isinstance(serialized_value, int): - res.setdefault("attributes", {})[key] = { - "value": serialized_value, - "type": "integer", - } - continue - - if isinstance(serialized_value, float): - res.setdefault("attributes", {})[key] = { - "value": serialized_value, - "type": "double", - } - continue - - if isinstance(serialized_value, str): - res.setdefault("attributes", {})[key] = { - "value": serialized_value, - "type": "string", - } - continue - - if isinstance(serialized_value, list): - if not serialized_value: - res.setdefault("attributes", {})[key] = {"value": [], "type": "array"} - - ty = type(serialized_value[0]) - if ty in (int, str, bool, float) and all( - type(v) is ty for v in serialized_value - ): - res.setdefault("attributes", {})[key] = { - "value": serialized_value, - "type": "array", - } - - continue + if not attributes: + return res - # Types returned when the serializer for V1 span attributes recurses into some container types. - if isinstance(serialized_value, (dict, list)): - res.setdefault("attributes", {})[key] = { - "value": json.dumps(serialized_value), - "type": "string", - } - continue + res["attributes"] = {} + for key, value in attributes.items(): + res["attributes"][key] = _serialized_v1_attribute_to_serialized_v2_attribute( + value + ) return res From f71e0ce84e3eacdbd46e0509f4f608c919778542 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 16 Apr 2026 10:50:59 +0200 Subject: [PATCH 10/36] openai tests --- tests/integrations/openai/test_openai.py | 891 ++++++++++++----------- 1 file changed, 450 insertions(+), 441 deletions(-) diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index ada2e633de..e53f8e4f55 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -132,14 +132,14 @@ async def __call__(self, *args, **kwargs): ], ) def test_nonstreaming_chat_completion_no_prompts( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -163,27 +163,26 @@ def test_nonstreaming_chat_completion_no_prompts( ) assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] + + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.parametrize( @@ -229,13 +228,13 @@ def test_nonstreaming_chat_completion_no_prompts( ), ], ) -def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, request): +def test_nonstreaming_chat_completion(sentry_init, capture_items, messages, request): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -256,30 +255,29 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req ) assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -290,12 +288,12 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req }, ] - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "the model response" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "the model response" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -308,14 +306,14 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req ], ) async def test_nonstreaming_chat_completion_async_no_prompts( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") client.chat.completions._post = mock.AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -336,27 +334,26 @@ async def test_nonstreaming_chat_completion_async_no_prompts( response = response.choices[0].message.content assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] + + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -404,14 +401,14 @@ async def test_nonstreaming_chat_completion_async_no_prompts( ], ) async def test_nonstreaming_chat_completion_async( - sentry_init, capture_events, messages, request + sentry_init, capture_items, messages, request ): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") client.chat.completions._post = AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -429,30 +426,29 @@ async def test_nonstreaming_chat_completion_async( response = response.choices[0].message.content assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -463,12 +459,12 @@ async def test_nonstreaming_chat_completion_async( }, ] - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "the model response" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "the model response" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 def tiktoken_encoding_if_installed(): @@ -491,7 +487,7 @@ def tiktoken_encoding_if_installed(): ) def test_streaming_chat_completion_no_prompts( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -507,7 +503,7 @@ def test_streaming_chat_completion_no_prompts( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -581,32 +577,31 @@ def test_streaming_chat_completion_no_prompts( ) assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" - - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -617,7 +612,7 @@ def test_streaming_chat_completion_no_prompts( ) def test_streaming_chat_completion_with_usage_in_stream( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -627,7 +622,7 @@ def test_streaming_chat_completion_with_usage_in_stream( traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -684,13 +679,11 @@ def test_streaming_chat_completion_with_usage_in_stream( for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.skipif( @@ -699,7 +692,7 @@ def test_streaming_chat_completion_with_usage_in_stream( ) def test_streaming_chat_completion_empty_content_preserves_token_usage( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -709,7 +702,7 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -747,13 +740,11 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert "gen_ai.usage.output_tokens" not in span["data"] - assert span["data"]["gen_ai.usage.total_tokens"] == 20 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert "gen_ai.usage.output_tokens" not in span["attributes"] + assert span["attributes"]["gen_ai.usage.total_tokens"] == 20 @pytest.mark.skipif( @@ -763,7 +754,7 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( @pytest.mark.asyncio async def test_streaming_chat_completion_empty_content_preserves_token_usage_async( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -774,7 +765,7 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -814,13 +805,11 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy async for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert "gen_ai.usage.output_tokens" not in span["data"] - assert span["data"]["gen_ai.usage.total_tokens"] == 20 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert "gen_ai.usage.output_tokens" not in span["attributes"] + assert span["attributes"]["gen_ai.usage.total_tokens"] == 20 @pytest.mark.skipif( @@ -830,7 +819,7 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy @pytest.mark.asyncio async def test_streaming_chat_completion_async_with_usage_in_stream( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -841,7 +830,7 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -900,13 +889,11 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( async for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 # noinspection PyTypeChecker @@ -955,7 +942,7 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( ) def test_streaming_chat_completion( sentry_init, - capture_events, + capture_items, messages, request, get_model_response, @@ -971,7 +958,7 @@ def test_streaming_chat_completion( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -1041,30 +1028,29 @@ def test_streaming_chat_completion( map(lambda x: x.choices[0].delta.content, response_stream) ) assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -1075,22 +1061,22 @@ def test_streaming_chat_completion( }, ] - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "hello world" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import if "blocks" in param_id: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 else: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 12 - assert span["data"]["gen_ai.usage.total_tokens"] == 14 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 12 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 14 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -1107,7 +1093,7 @@ def test_streaming_chat_completion( ) async def test_streaming_chat_completion_async_no_prompts( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1124,7 +1110,7 @@ async def test_streaming_chat_completion_async_no_prompts( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -1201,32 +1187,31 @@ async def test_streaming_chat_completion_async_no_prompts( response_string += x.choices[0].delta.content assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" - - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -1279,7 +1264,7 @@ async def test_streaming_chat_completion_async_no_prompts( ) async def test_streaming_chat_completion_async( sentry_init, - capture_events, + capture_items, messages, request, get_model_response, @@ -1296,7 +1281,7 @@ async def test_streaming_chat_completion_async( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") @@ -1371,32 +1356,31 @@ async def test_streaming_chat_completion_async( response_string += x.choices[0].delta.content assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -1407,28 +1391,28 @@ async def test_streaming_chat_completion_async( }, ] - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "hello world" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import if "blocks" in param_id: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 else: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 12 - assert span["data"]["gen_ai.usage.total_tokens"] == 14 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 12 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 14 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly -def test_bad_chat_completion(sentry_init, capture_events): +def test_bad_chat_completion(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock( @@ -1440,13 +1424,13 @@ def test_bad_chat_completion(sentry_init, capture_events): messages=[{"role": "system", "content": "hello"}], ) - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" -def test_span_status_error(sentry_init, capture_events): +def test_span_status_error(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "transaction", "span") with start_transaction(name="test"): client = OpenAI(api_key="z") @@ -1458,17 +1442,20 @@ def test_span_status_error(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (error, transaction) = events - assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" + (event,) = (item.payload for item in items if item.type == "event") + assert event["level"] == "error" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" @pytest.mark.asyncio -async def test_bad_chat_completion_async(sentry_init, capture_events): +async def test_bad_chat_completion_async(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event") client = AsyncOpenAI(api_key="z") client.chat.completions._post = AsyncMock( @@ -1479,7 +1466,7 @@ async def test_bad_chat_completion_async(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" @@ -1492,14 +1479,14 @@ async def test_bad_chat_completion_async(sentry_init, capture_events): ], ) def test_embeddings_create_no_pii( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") @@ -1521,17 +1508,15 @@ def test_embeddings_create_no_pii( assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.parametrize( @@ -1577,13 +1562,13 @@ def test_embeddings_create_no_pii( ), ], ) -def test_embeddings_create(sentry_init, capture_events, input, request): +def test_embeddings_create(sentry_init, capture_items, input, request): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") @@ -1603,24 +1588,24 @@ def test_embeddings_create(sentry_init, capture_events, input, request): assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" param_id = request.node.callspec.id if param_id == "string": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == ["hello"] + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + "hello" + ] elif param_id == "string_sequence" or param_id == "string_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ "First text", "Second text", "Third text", ] elif param_id == "tokens" or param_id == "token_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ 5, 8, 13, @@ -1628,13 +1613,13 @@ def test_embeddings_create(sentry_init, capture_events, input, request): 34, ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ [5, 8, 13, 21, 34], [8, 13, 21, 34, 55], ] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -1647,14 +1632,14 @@ def test_embeddings_create(sentry_init, capture_events, input, request): ], ) async def test_embeddings_create_async_no_pii( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") @@ -1676,17 +1661,15 @@ async def test_embeddings_create_async_no_pii( assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -1733,13 +1716,13 @@ async def test_embeddings_create_async_no_pii( ), ], ) -async def test_embeddings_create_async(sentry_init, capture_events, input, request): +async def test_embeddings_create_async(sentry_init, capture_items, input, request): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") @@ -1761,24 +1744,24 @@ async def test_embeddings_create_async(sentry_init, capture_events, input, reque assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" param_id = request.node.callspec.id if param_id == "string": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == ["hello"] + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + "hello" + ] elif param_id == "string_sequence" or param_id == "string_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ "First text", "Second text", "Third text", ] elif param_id == "tokens" or param_id == "token_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ 5, 8, 13, @@ -1786,13 +1769,13 @@ async def test_embeddings_create_async(sentry_init, capture_events, input, reque 34, ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ [5, 8, 13, 21, 34], [8, 13, 21, 34, 55], ] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.parametrize( @@ -1800,14 +1783,14 @@ async def test_embeddings_create_async(sentry_init, capture_events, input, reque [(True, True), (True, False), (False, True), (False, False)], ) def test_embeddings_create_raises_error( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("event") client = OpenAI(api_key="z") @@ -1818,7 +1801,7 @@ def test_embeddings_create_raises_error( with pytest.raises(OpenAIError): client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" @@ -1828,14 +1811,14 @@ def test_embeddings_create_raises_error( [(True, True), (True, False), (False, True), (False, False)], ) async def test_embeddings_create_raises_error_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("event") client = AsyncOpenAI(api_key="z") @@ -1846,16 +1829,16 @@ async def test_embeddings_create_raises_error_async( with pytest.raises(OpenAIError): await client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" -def test_span_origin_nonstreaming_chat(sentry_init, capture_events): +def test_span_origin_nonstreaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -1865,19 +1848,20 @@ def test_span_origin_nonstreaming_chat(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" @pytest.mark.asyncio -async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_events): +async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="z") client.chat.completions._post = AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -1887,18 +1871,19 @@ async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" -def test_span_origin_streaming_chat(sentry_init, capture_events): +def test_span_origin_streaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") returned_stream = Stream(cast_to=None, response=None, client=client) @@ -1946,21 +1931,22 @@ def test_span_origin_streaming_chat(sentry_init, capture_events): "".join(map(lambda x: x.choices[0].delta.content, response_stream)) - (event,) = events + (transaction,) = (item.payload for item in items if item.type == "transaction") + assert transaction["contexts"]["trace"]["origin"] == "manual" - assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" @pytest.mark.asyncio async def test_span_origin_streaming_chat_async( - sentry_init, capture_events, async_iterator + sentry_init, capture_items, async_iterator ): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="z") returned_stream = AsyncStream(cast_to=None, response=None, client=client) @@ -2014,18 +2000,19 @@ async def test_span_origin_streaming_chat_async( # "".join(map(lambda x: x.choices[0].delta.content, response_stream)) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" -def test_span_origin_embeddings(sentry_init, capture_events): + +def test_span_origin_embeddings(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") @@ -2043,19 +2030,20 @@ def test_span_origin_embeddings(sentry_init, capture_events): with start_transaction(name="openai tx"): client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events - + (event,) = [item.payload for item in items if item.type == "transaction"] assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" @pytest.mark.asyncio -async def test_span_origin_embeddings_async(sentry_init, capture_events): +async def test_span_origin_embeddings_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="z") @@ -2073,10 +2061,11 @@ async def test_span_origin_embeddings_async(sentry_init, capture_events): with start_transaction(name="openai tx"): await client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events - + (event,) = [item.payload for item in items if item.type == "transaction"] assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" def test_completions_token_usage_from_response(): @@ -2442,12 +2431,12 @@ def count_tokens(msg): @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") -def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): +def test_ai_client_span_responses_api_no_pii(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.responses._post = mock.Mock(return_value=EXAMPLE_RESPONSE) @@ -2462,13 +2451,10 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): top_p=0.9, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["op"] == "gen_ai.responses" - assert spans[0]["origin"] == "auto.ai.openai" - assert spans[0]["data"] == { + assert spans[0]["attributes"] == { "gen_ai.operation.name": "responses", "gen_ai.request.max_tokens": 100, "gen_ai.request.temperature": 0.7, @@ -2482,13 +2468,21 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): "gen_ai.usage.output_tokens": 10, "gen_ai.usage.output_tokens.reasoning": 8, "gen_ai.usage.total_tokens": 30, + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert "gen_ai.system_instructions" not in spans[0]["data"] - assert "gen_ai.request.messages" not in spans[0]["data"] - assert "gen_ai.response.text" not in spans[0]["data"] + assert "gen_ai.system_instructions" not in spans[0]["attributes"] + assert "gen_ai.request.messages" not in spans[0]["attributes"] + assert "gen_ai.response.text" not in spans[0]["attributes"] @pytest.mark.parametrize( @@ -2557,14 +2551,14 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): ) @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") def test_ai_client_span_responses_api( - sentry_init, capture_events, instructions, input, request + sentry_init, capture_items, instructions, input, request ): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.responses._post = mock.Mock(return_value=EXAMPLE_RESPONSE) @@ -2579,12 +2573,9 @@ def test_ai_client_span_responses_api( top_p=0.9, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["op"] == "gen_ai.responses" - assert spans[0]["origin"] == "auto.ai.openai" expected_data = { "gen_ai.operation.name": "responses", @@ -2601,6 +2592,14 @@ def test_ai_client_span_responses_api( "gen_ai.usage.total_tokens": 30, "gen_ai.request.model": "gpt-4o", "gen_ai.response.text": "the model response", + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -2759,17 +2758,17 @@ def test_ai_client_span_responses_api( } ) - assert spans[0]["data"] == expected_data + assert spans[0]["attributes"] == expected_data @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") -def test_error_in_responses_api(sentry_init, capture_events): +def test_error_in_responses_api(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "transaction", "span") client = OpenAI(api_key="z") client.responses._post = mock.Mock( @@ -2784,15 +2783,17 @@ def test_error_in_responses_api(sentry_init, capture_events): input="How do I check if a Python object is an instance of a class?", ) - (error_event, transaction_event) = events - - assert transaction_event["type"] == "transaction" # make sure the span where the error occurred is captured - assert transaction_event["spans"][0]["op"] == "gen_ai.responses" + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.op"] == "gen_ai.responses" + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "OpenAIError" + (transaction_event,) = ( + item.payload for item in items if item.type == "transaction" + ) assert ( error_event["contexts"]["trace"]["trace_id"] == transaction_event["contexts"]["trace"]["trace_id"] @@ -2866,14 +2867,14 @@ def test_error_in_responses_api(sentry_init, capture_events): ], ) async def test_ai_client_span_responses_async_api( - sentry_init, capture_events, instructions, input, request + sentry_init, capture_items, instructions, input, request ): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") client.responses._post = AsyncMock(return_value=EXAMPLE_RESPONSE) @@ -2888,12 +2889,9 @@ async def test_ai_client_span_responses_async_api( top_p=0.9, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["op"] == "gen_ai.responses" - assert spans[0]["origin"] == "auto.ai.openai" expected_data = { "gen_ai.operation.name": "responses", @@ -2911,6 +2909,14 @@ async def test_ai_client_span_responses_async_api( "gen_ai.usage.output_tokens.reasoning": 8, "gen_ai.usage.total_tokens": 30, "gen_ai.response.text": "the model response", + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -3069,7 +3075,7 @@ async def test_ai_client_span_responses_async_api( } ) - assert spans[0]["data"] == expected_data + assert spans[0]["attributes"] == expected_data @pytest.mark.asyncio @@ -3140,7 +3146,7 @@ async def test_ai_client_span_responses_async_api( @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") async def test_ai_client_span_streaming_responses_async_api( sentry_init, - capture_events, + capture_items, instructions, input, request, @@ -3153,7 +3159,7 @@ async def test_ai_client_span_streaming_responses_async_api( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3178,11 +3184,12 @@ async def test_ai_client_span_streaming_responses_async_api( async for _ in result: pass - (transaction,) = events - spans = [span for span in transaction["spans"] if span["op"] == OP.GEN_AI_RESPONSES] + spans = [item.payload for item in items if item.type == "span"] + spans = [ + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_RESPONSES + ] assert len(spans) == 1 - assert spans[0]["origin"] == "auto.ai.openai" expected_data = { "gen_ai.operation.name": "responses", @@ -3200,6 +3207,14 @@ async def test_ai_client_span_streaming_responses_async_api( "gen_ai.usage.total_tokens": 30, "gen_ai.request.model": "gpt-4o", "gen_ai.response.text": "hello world", + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -3358,18 +3373,18 @@ async def test_ai_client_span_streaming_responses_async_api( } ) - assert spans[0]["data"] == expected_data + assert spans[0]["attributes"] == expected_data @pytest.mark.asyncio @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") -async def test_error_in_responses_async_api(sentry_init, capture_events): +async def test_error_in_responses_async_api(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "transaction", "span") client = AsyncOpenAI(api_key="z") client.responses._post = AsyncMock( @@ -3384,15 +3399,17 @@ async def test_error_in_responses_async_api(sentry_init, capture_events): input="How do I check if a Python object is an instance of a class?", ) - (error_event, transaction_event) = events - - assert transaction_event["type"] == "transaction" # make sure the span where the error occurred is captured - assert transaction_event["spans"][0]["op"] == "gen_ai.responses" + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.op"] == "gen_ai.responses" + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "OpenAIError" + (transaction_event,) = ( + item.payload for item in items if item.type == "transaction" + ) assert ( error_event["contexts"]["trace"]["trace_id"] == transaction_event["contexts"]["trace"]["trace_id"] @@ -3479,7 +3496,7 @@ async def test_error_in_responses_async_api(sentry_init, capture_events): @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") def test_streaming_responses_api( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3494,7 +3511,7 @@ def test_streaming_responses_api( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -3525,26 +3542,25 @@ def test_streaming_responses_api( assert response_string == "hello world" - (transaction,) = events - (span,) = transaction["spans"] - assert span["op"] == "gen_ai.responses" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + (span,) = (item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -3555,7 +3571,7 @@ def test_streaming_responses_api( @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") async def test_streaming_responses_api_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3571,7 +3587,7 @@ async def test_streaming_responses_api_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3600,26 +3616,25 @@ async def test_streaming_responses_api_async( assert response_string == "hello world" - (transaction,) = events - (span,) = transaction["spans"] - assert span["op"] == "gen_ai.responses" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + (span,) = (item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.skipif( @@ -3630,12 +3645,12 @@ async def test_streaming_responses_api_async( "tools", [[], None, NOT_GIVEN, omit], ) -def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): +def test_empty_tools_in_chat_completion(sentry_init, capture_items, tools): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -3647,10 +3662,9 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): tools=tools, ) - (event,) = events - span = event["spans"][0] + span = next(item.payload for item in items if item.type == "span") - assert "gen_ai.request.available_tools" not in span["data"] + assert "gen_ai.request.available_tools" not in span["attributes"] # Test messages with mixed roles including "ai" that should be mapped to "assistant" @@ -3669,7 +3683,7 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): ], ) def test_openai_message_role_mapping( - sentry_init, capture_events, test_message, expected_role + sentry_init, capture_items, test_message, expected_role ): """Test that OpenAI integration properly maps message roles like 'ai' to 'assistant'""" @@ -3678,7 +3692,7 @@ def test_openai_message_role_mapping( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -3688,28 +3702,27 @@ def test_openai_message_role_mapping( with start_transaction(name="openai tx"): client.chat.completions.create(model="test-model", messages=test_messages) # Verify that the span was created correctly - (event,) = events - span = event["spans"][0] - assert span["op"] == "gen_ai.chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] # Parse the stored messages import json - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == expected_role -def test_openai_message_truncation(sentry_init, capture_events): +def test_openai_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in OpenAI integration.""" sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -3730,17 +3743,17 @@ def test_openai_message_truncation(sentry_init, capture_events): messages=large_messages, ) - (event,) = events - span = event["spans"][0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + span = next(item.payload for item in items if item.type == "span") + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] - messages_data = span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) <= len(large_messages) + (event,) = (item.payload for item in items if item.type == "transaction") meta_path = event["_meta"] span_meta = meta_path["spans"]["0"]["data"] messages_meta = span_meta[SPANDATA.GEN_AI_REQUEST_MESSAGES] @@ -3749,7 +3762,7 @@ def test_openai_message_truncation(sentry_init, capture_events): # noinspection PyTypeChecker def test_streaming_chat_completion_ttft( - sentry_init, capture_events, get_model_response, server_side_event_chunks + sentry_init, capture_items, get_model_response, server_side_event_chunks ): """ Test that streaming chat completions capture time-to-first-token (TTFT). @@ -3758,7 +3771,7 @@ def test_streaming_chat_completion_ttft( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -3810,13 +3823,12 @@ def test_streaming_chat_completion_ttft( for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 @@ -3825,7 +3837,7 @@ def test_streaming_chat_completion_ttft( @pytest.mark.asyncio async def test_streaming_chat_completion_ttft_async( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -3837,7 +3849,7 @@ async def test_streaming_chat_completion_ttft_async( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3891,13 +3903,12 @@ async def test_streaming_chat_completion_ttft_async( async for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 @@ -3905,7 +3916,7 @@ async def test_streaming_chat_completion_ttft_async( # noinspection PyTypeChecker @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") def test_streaming_responses_api_ttft( - sentry_init, capture_events, get_model_response, server_side_event_chunks + sentry_init, capture_items, get_model_response, server_side_event_chunks ): """ Test that streaming responses API captures time-to-first-token (TTFT). @@ -3914,7 +3925,7 @@ def test_streaming_responses_api_ttft( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -3936,13 +3947,12 @@ def test_streaming_responses_api_ttft( for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.responses" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 @@ -3952,7 +3962,7 @@ def test_streaming_responses_api_ttft( @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") async def test_streaming_responses_api_ttft_async( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -3964,7 +3974,7 @@ async def test_streaming_responses_api_ttft_async( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3986,12 +3996,11 @@ async def test_streaming_responses_api_ttft_async( async for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.responses" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 From 1fab6321ef8a6eb80ecc8fc44c2c733c959a62b4 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 16 Apr 2026 11:43:47 +0200 Subject: [PATCH 11/36] anthropic tests --- .../integrations/anthropic/test_anthropic.py | 1478 +++++++++-------- 1 file changed, 747 insertions(+), 731 deletions(-) diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index e86f7e1fa9..c7fc280b6c 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -91,14 +91,14 @@ async def __call__(self, *args, **kwargs): ], ) def test_nonstreaming_create_message( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -120,37 +120,38 @@ def test_nonstreaming_create_message( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] @pytest.mark.asyncio @@ -164,14 +165,14 @@ def test_nonstreaming_create_message( ], ) async def test_nonstreaming_create_message_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -193,36 +194,37 @@ async def test_nonstreaming_create_message_async( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.parametrize( @@ -236,7 +238,7 @@ async def test_nonstreaming_create_message_async( ) def test_streaming_create_message( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -286,7 +288,7 @@ def test_streaming_create_message( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -308,42 +310,45 @@ def test_streaming_create_message( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] def test_streaming_create_message_close( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -391,7 +396,7 @@ def test_streaming_create_message_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -415,31 +420,34 @@ def test_streaming_create_message_close( messages.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -448,7 +456,7 @@ def test_streaming_create_message_close( ) def test_streaming_create_message_api_error( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -491,7 +499,7 @@ def test_streaming_create_message_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -513,34 +521,36 @@ def test_streaming_create_message_api_error( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @@ -555,7 +565,7 @@ def test_streaming_create_message_api_error( ) def test_stream_messages( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -605,7 +615,7 @@ def test_stream_messages( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -628,42 +638,45 @@ def test_stream_messages( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] def test_stream_messages_close( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -711,7 +724,7 @@ def test_stream_messages_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -740,31 +753,34 @@ def test_stream_messages_close( stream.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -773,7 +789,7 @@ def test_stream_messages_close( ) def test_stream_messages_api_error( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -816,7 +832,7 @@ def test_stream_messages_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -839,34 +855,36 @@ def test_stream_messages_api_error( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @@ -882,7 +900,7 @@ def test_stream_messages_api_error( ) async def test_streaming_create_message_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -936,7 +954,7 @@ async def test_streaming_create_message_async( default_integrations=False, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -958,44 +976,45 @@ async def test_streaming_create_message_async( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] @pytest.mark.asyncio async def test_streaming_create_message_async_close( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1046,7 +1065,7 @@ async def test_streaming_create_message_async_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1069,31 +1088,34 @@ async def test_streaming_create_message_async_close( await messages.__anext__() await messages.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -1103,7 +1125,7 @@ async def test_streaming_create_message_async_close( @pytest.mark.asyncio async def test_streaming_create_message_async_api_error( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1149,7 +1171,7 @@ async def test_streaming_create_message_async_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1171,34 +1193,36 @@ async def test_streaming_create_message_async_api_error( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @@ -1214,7 +1238,7 @@ async def test_streaming_create_message_async_api_error( ) async def test_stream_message_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1267,7 +1291,7 @@ async def test_stream_message_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1290,37 +1314,38 @@ async def test_stream_message_async( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -1330,7 +1355,7 @@ async def test_stream_message_async( @pytest.mark.asyncio async def test_stream_messages_async_api_error( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1376,7 +1401,7 @@ async def test_stream_messages_async_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1399,41 +1424,43 @@ async def test_stream_messages_async_api_error( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @pytest.mark.asyncio async def test_stream_messages_async_close( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1484,7 +1511,7 @@ async def test_stream_messages_async_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1515,31 +1542,34 @@ async def test_stream_messages_async_close( await stream.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -1557,7 +1587,7 @@ async def test_stream_messages_async_close( ) def test_streaming_create_message_with_input_json_delta( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1637,7 +1667,7 @@ def test_streaming_create_message_with_input_json_delta( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1659,38 +1689,36 @@ def test_streaming_create_message_with_input_json_delta( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.skipif( @@ -1708,7 +1736,7 @@ def test_streaming_create_message_with_input_json_delta( ) def test_stream_messages_with_input_json_delta( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1788,7 +1816,7 @@ def test_stream_messages_with_input_json_delta( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1811,38 +1839,36 @@ def test_stream_messages_with_input_json_delta( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -1861,7 +1887,7 @@ def test_stream_messages_with_input_json_delta( ) async def test_streaming_create_message_with_input_json_delta_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1947,7 +1973,7 @@ async def test_streaming_create_message_with_input_json_delta_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1969,39 +1995,37 @@ async def test_streaming_create_message_with_input_json_delta_async( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -2020,7 +2044,7 @@ async def test_streaming_create_message_with_input_json_delta_async( ) async def test_stream_message_with_input_json_delta_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -2106,7 +2130,7 @@ async def test_stream_message_with_input_json_delta_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -2129,44 +2153,42 @@ async def test_stream_message_with_input_json_delta_async( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True -def test_exception_message_create(sentry_init, capture_events): +def test_exception_message_create(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "transaction") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -2179,14 +2201,16 @@ def test_exception_message_create(sentry_init, capture_events): max_tokens=1024, ) - (event, transaction) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" -def test_span_status_error(sentry_init, capture_events): +def test_span_status_error(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "span") with start_transaction(name="anthropic"): client = Anthropic(api_key="z") @@ -2200,18 +2224,19 @@ def test_span_status_error(sentry_init, capture_events): max_tokens=1024, ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.asyncio -async def test_span_status_error_async(sentry_init, capture_events): +async def test_span_status_error_async(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "span") with start_transaction(name="anthropic"): client = AsyncAnthropic(api_key="z") @@ -2225,18 +2250,19 @@ async def test_span_status_error_async(sentry_init, capture_events): max_tokens=1024, ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.asyncio -async def test_exception_message_create_async(sentry_init, capture_events): +async def test_exception_message_create_async(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "transaction") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock( @@ -2249,17 +2275,19 @@ async def test_exception_message_create_async(sentry_init, capture_events): max_tokens=1024, ) - (event, transaction) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" -def test_span_origin(sentry_init, capture_events): +def test_span_origin(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -2274,21 +2302,22 @@ def test_span_origin(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.asyncio -async def test_span_origin_async(sentry_init, capture_events): +async def test_span_origin_async(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -2303,12 +2332,13 @@ async def test_span_origin_async(sentry_init, capture_events): with start_transaction(name="anthropic"): await client.messages.create(max_tokens=1024, messages=messages, model="model") - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.skipif( @@ -2392,7 +2422,7 @@ def test_set_output_data_with_input_json_delta(sentry_init): ], ) def test_anthropic_message_role_mapping( - sentry_init, capture_events, test_message, expected_role + sentry_init, capture_items, test_message, expected_role ): """Test that Anthropic integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( @@ -2400,7 +2430,7 @@ def test_anthropic_message_role_mapping( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -2425,29 +2455,28 @@ def mock_messages_create(*args, **kwargs): model="claude-3-opus", max_tokens=10, messages=test_messages ) - (event,) = events - span = event["spans"][0] + span = next(item.payload for item in items if item.type == "span") # Verify that the span was created correctly - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] # Parse the stored messages - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert stored_messages[0]["role"] == expected_role -def test_anthropic_message_truncation(sentry_init, capture_events): +def test_anthropic_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in Anthropic integration.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -2466,21 +2495,18 @@ def test_anthropic_message_truncation(sentry_init, capture_events): with start_transaction(): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] chat_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT + span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT ] assert len(chat_spans) > 0 chat_span = chat_spans[0] - assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] + assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"] - messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -2488,18 +2514,19 @@ def test_anthropic_message_truncation(sentry_init, capture_events): assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + tx = next(item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 @pytest.mark.asyncio -async def test_anthropic_message_truncation_async(sentry_init, capture_events): +async def test_anthropic_message_truncation_async(sentry_init, capture_items): """Test that large messages are truncated properly in Anthropic integration.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = mock.AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -2518,21 +2545,18 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_events): with start_transaction(): await client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] chat_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT + span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT ] assert len(chat_spans) > 0 chat_span = chat_spans[0] - assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] + assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"] - messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -2540,6 +2564,7 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_events): assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + tx = next(item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 @@ -2553,7 +2578,7 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_events): ], ) def test_nonstreaming_create_message_with_system_prompt( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that system prompts are properly captured in GEN_AI_REQUEST_MESSAGES.""" sentry_init( @@ -2561,7 +2586,7 @@ def test_nonstreaming_create_message_with_system_prompt( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -2586,46 +2611,46 @@ def test_nonstreaming_create_message_with_system_prompt( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] @pytest.mark.asyncio @@ -2639,7 +2664,7 @@ def test_nonstreaming_create_message_with_system_prompt( ], ) async def test_nonstreaming_create_message_with_system_prompt_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that system prompts are properly captured in GEN_AI_REQUEST_MESSAGES (async).""" sentry_init( @@ -2647,7 +2672,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -2672,46 +2697,46 @@ async def test_nonstreaming_create_message_with_system_prompt_async( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] @pytest.mark.parametrize( @@ -2725,7 +2750,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async( ) def test_streaming_create_message_with_system_prompt( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -2776,7 +2801,7 @@ def test_streaming_create_message_with_system_prompt( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -2802,46 +2827,46 @@ def test_streaming_create_message_with_system_prompt( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.parametrize( @@ -2855,7 +2880,7 @@ def test_streaming_create_message_with_system_prompt( ) def test_stream_messages_with_system_prompt( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -2906,7 +2931,7 @@ def test_stream_messages_with_system_prompt( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -2930,46 +2955,46 @@ def test_stream_messages_with_system_prompt( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -2984,7 +3009,7 @@ def test_stream_messages_with_system_prompt( ) async def test_stream_message_with_system_prompt_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3038,7 +3063,7 @@ async def test_stream_message_with_system_prompt_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -3062,46 +3087,46 @@ async def test_stream_message_with_system_prompt_async( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -3116,7 +3141,7 @@ async def test_stream_message_with_system_prompt_async( ) async def test_streaming_create_message_with_system_prompt_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3170,7 +3195,7 @@ async def test_streaming_create_message_with_system_prompt_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -3196,56 +3221,56 @@ async def test_streaming_create_message_with_system_prompt_async( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True -def test_system_prompt_with_complex_structure(sentry_init, capture_events): +def test_system_prompt_with_complex_structure(sentry_init, capture_items): """Test that complex system prompt structures (list of text blocks) are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3268,17 +3293,18 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_events): ) assert response == EXAMPLE_MESSAGE - assert len(events) == 1 - (event,) = events - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] - system_instructions = json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] + system_instructions = json.loads( + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) # System content should be a list of text blocks assert isinstance(system_instructions, list) @@ -3287,8 +3313,8 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_events): {"type": "text", "content": "Be concise and clear."}, ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" @@ -3490,14 +3516,14 @@ def test_transform_message_content_list_anthropic(): # Integration tests for binary data in messages -def test_message_with_base64_image(sentry_init, capture_events): +def test_message_with_base64_image(sentry_init, capture_items): """Test that messages with base64 images are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3521,12 +3547,11 @@ def test_message_with_base64_image(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" @@ -3541,14 +3566,14 @@ def test_message_with_base64_image(sentry_init, capture_events): } -def test_message_with_url_image(sentry_init, capture_events): +def test_message_with_url_image(sentry_init, capture_items): """Test that messages with URL-referenced images are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3571,11 +3596,10 @@ def test_message_with_url_image(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "uri", @@ -3585,14 +3609,14 @@ def test_message_with_url_image(sentry_init, capture_events): } -def test_message_with_file_image(sentry_init, capture_events): +def test_message_with_file_image(sentry_init, capture_items): """Test that messages with file_id-referenced images are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3616,11 +3640,10 @@ def test_message_with_file_image(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "file", @@ -3630,14 +3653,14 @@ def test_message_with_file_image(sentry_init, capture_events): } -def test_message_with_base64_pdf(sentry_init, capture_events): +def test_message_with_base64_pdf(sentry_init, capture_items): """Test that messages with base64-encoded PDF documents are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3651,7 +3674,7 @@ def test_message_with_base64_pdf(sentry_init, capture_events): "source": { "type": "base64", "media_type": "application/pdf", - "data": "JVBERi0xLjQKJeLj...base64pdfdata", + "attributes": "JVBERi0xLjQKJeLj...base64pdfdata", }, }, ], @@ -3661,11 +3684,10 @@ def test_message_with_base64_pdf(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "blob", @@ -3675,14 +3697,14 @@ def test_message_with_base64_pdf(sentry_init, capture_events): } -def test_message_with_url_pdf(sentry_init, capture_events): +def test_message_with_url_pdf(sentry_init, capture_items): """Test that messages with URL-referenced PDF documents are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3705,11 +3727,10 @@ def test_message_with_url_pdf(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "uri", @@ -3719,14 +3740,14 @@ def test_message_with_url_pdf(sentry_init, capture_events): } -def test_message_with_file_document(sentry_init, capture_events): +def test_message_with_file_document(sentry_init, capture_items): """Test that messages with file_id-referenced documents are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3750,11 +3771,10 @@ def test_message_with_file_document(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "file", @@ -3764,14 +3784,14 @@ def test_message_with_file_document(sentry_init, capture_events): } -def test_message_with_mixed_content(sentry_init, capture_events): +def test_message_with_mixed_content(sentry_init, capture_items): """Test that messages with mixed content (text, images, documents) are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3785,7 +3805,7 @@ def test_message_with_mixed_content(sentry_init, capture_events): "source": { "type": "base64", "media_type": "image/png", - "data": "iVBORw0KGgo...base64imagedata", + "attributes": "iVBORw0KGgo...base64imagedata", }, }, { @@ -3800,7 +3820,7 @@ def test_message_with_mixed_content(sentry_init, capture_events): "source": { "type": "base64", "media_type": "application/pdf", - "data": "JVBERi0xLjQK...base64pdfdata", + "attributes": "JVBERi0xLjQK...base64pdfdata", }, }, {"type": "text", "text": "Please provide a detailed analysis."}, @@ -3811,11 +3831,10 @@ def test_message_with_mixed_content(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert len(content) == 5 @@ -3847,14 +3866,14 @@ def test_message_with_mixed_content(sentry_init, capture_events): } -def test_message_with_multiple_images_different_formats(sentry_init, capture_events): +def test_message_with_multiple_images_different_formats(sentry_init, capture_items): """Test that messages with multiple images of different source types are handled.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3867,7 +3886,7 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_eve "source": { "type": "base64", "media_type": "image/jpeg", - "data": "base64data1...", + "attributes": "base64data1...", }, }, { @@ -3893,11 +3912,10 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_eve with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert len(content) == 4 @@ -3922,14 +3940,14 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_eve assert content[3] == {"type": "text", "text": "Compare these three images."} -def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_events): +def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_items): """Test that binary content is not stored when send_default_pii is False.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3943,7 +3961,7 @@ def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_events "source": { "type": "base64", "media_type": "image/jpeg", - "data": "base64encodeddatahere...", + "attributes": "base64encodeddatahere...", }, }, ], @@ -3953,22 +3971,21 @@ def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_events with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans # Messages should not be stored - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] -def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_events): +def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_items): """Test that binary content is not stored when include_prompts is False.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3982,7 +3999,7 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_ev "source": { "type": "base64", "media_type": "image/jpeg", - "data": "base64encodeddatahere...", + "attributes": "base64encodeddatahere...", }, }, ], @@ -3992,18 +4009,17 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_ev with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans # Messages should not be stored - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] -def test_cache_tokens_nonstreaming(sentry_init, capture_events): +def test_cache_tokens_nonstreaming(sentry_init, capture_items): """Test cache read/write tokens are tracked for non-streaming responses.""" sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4029,16 +4045,16 @@ def test_cache_tokens_nonstreaming(sentry_init, capture_events): model="claude-3-5-sonnet-20241022", ) - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens normalized: 100 + 80 (cache_read) + 20 (cache_write) = 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 50 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 250 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 50 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 250 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 -def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_events): +def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_items): """ Test that gen_ai.usage.input_tokens includes cache_write tokens (non-streaming). @@ -4051,7 +4067,7 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_even cache_creation_input_tokens=2846, cache_read_input_tokens=0) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4077,16 +4093,16 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_even model="claude-sonnet-4-20250514", ) - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens should be total: 19 (non-cached) + 2846 (cache_write) = 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 0 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 2846 -def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_events): +def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_items): """ Test that gen_ai.usage.input_tokens includes cache_read tokens (non-streaming). @@ -4099,7 +4115,7 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_event cache_creation_input_tokens=0, cache_read_input_tokens=2846) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4125,18 +4141,18 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_event model="claude-sonnet-4-20250514", ) - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens should be total: 19 (non-cached) + 2846 (cache_read) = 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 def test_input_tokens_include_cache_read_streaming( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -4176,7 +4192,7 @@ def test_input_tokens_include_cache_read_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4192,18 +4208,18 @@ def test_input_tokens_include_cache_read_streaming( ): pass - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens should be total: 19 + 2846 = test_stream_messages_input_tokens_include_cache_read_streaming - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 def test_stream_messages_input_tokens_include_cache_read_streaming( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -4242,7 +4258,7 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4258,16 +4274,16 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( for event in stream: pass - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens should be total: 19 + 2846 = 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 -def test_input_tokens_unchanged_without_caching(sentry_init, capture_events): +def test_input_tokens_unchanged_without_caching(sentry_init, capture_items): """ Test that input_tokens is unchanged when there are no cached tokens. @@ -4275,7 +4291,7 @@ def test_input_tokens_unchanged_without_caching(sentry_init, capture_events): Usage(input_tokens=20, output_tokens=12) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4299,15 +4315,15 @@ def test_input_tokens_unchanged_without_caching(sentry_init, capture_events): model="claude-sonnet-4-20250514", ) - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 32 # 20 + 12 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 32 # 20 + 12 def test_cache_tokens_streaming( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -4343,7 +4359,7 @@ def test_cache_tokens_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4359,17 +4375,17 @@ def test_cache_tokens_streaming( ): pass - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens normalized: 100 + 80 (cache_read) + 20 (cache_write) = 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 def test_stream_messages_cache_tokens( - sentry_init, capture_events, get_model_response, server_side_event_chunks + sentry_init, capture_items, get_model_response, server_side_event_chunks ): """Test cache tokens are tracked for streaming responses.""" client = Anthropic(api_key="z") @@ -4403,7 +4419,7 @@ def test_stream_messages_cache_tokens( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4419,10 +4435,10 @@ def test_stream_messages_cache_tokens( for event in stream: pass - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens normalized: 100 + 80 (cache_read) + 20 (cache_write) = 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 From f44316dfa45f83d02e7f65908340aeeadcfbe70f Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Thu, 16 Apr 2026 15:24:52 +0200 Subject: [PATCH 12/36] google-genai tests --- .../google_genai/test_google_genai.py | 507 +++++++++--------- 1 file changed, 248 insertions(+), 259 deletions(-) diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py index 6e91ba6634..e074b79c8c 100644 --- a/tests/integrations/google_genai/test_google_genai.py +++ b/tests/integrations/google_genai/test_google_genai.py @@ -124,14 +124,14 @@ def create_test_config( ], ) def test_nonstreaming_generate_content( - sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client + sentry_init, capture_items, send_default_pii, include_prompts, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the HTTP response at the _api_client.request() level mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -146,38 +146,37 @@ def test_nonstreaming_generate_content( mock_genai_client.models.generate_content( model="gemini-1.5-flash", contents="Tell me a joke", config=config ) - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "google_genai" - assert len(event["spans"]) == 1 - chat_span = event["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + chat_span = next(item.payload for item in items if item.type == "span") # Check chat span - assert chat_span["op"] == OP.GEN_AI_CHAT - assert chat_span["description"] == "chat gemini-1.5-flash" - assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" - assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert chat_span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert chat_span["name"] == "chat gemini-1.5-flash" + assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" if send_default_pii and include_prompts: # Response text is stored as a JSON array - response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] # Parse the JSON array response_texts = json.loads(response_text) assert response_texts == ["Hello! How can I help you today?"] else: - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["attributes"] # Check token usage - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 # Output tokens now include reasoning tokens: candidates_token_count (20) + thoughts_token_count (3) = 23 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 23 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 23 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 @pytest.mark.parametrize("generate_content_config", (False, True)) @@ -210,7 +209,7 @@ def test_nonstreaming_generate_content( ) def test_generate_content_with_system_instruction( sentry_init, - capture_events, + capture_items, mock_genai_client, generate_content_config, system_instructions, @@ -221,7 +220,7 @@ def test_generate_content_with_system_instruction( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -243,16 +242,15 @@ def test_generate_content_with_system_instruction( config=config, ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") if expected_texts is None: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_span["attributes"] return # (PII is enabled and include_prompts is True in this test) system_instructions = json.loads( - invoke_span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + invoke_span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ @@ -260,12 +258,12 @@ def test_generate_content_with_system_instruction( ] -def test_generate_content_with_tools(sentry_init, capture_events, mock_genai_client): +def test_generate_content_with_tools(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Create a mock tool function def get_weather(location: str) -> str: @@ -319,18 +317,17 @@ def get_weather(location: str) -> str: model="gemini-1.5-flash", contents="What's the weather?", config=config ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") # Check that tools are recorded (data is serialized as a string) - tools_data_str = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data_str = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] # Parse the JSON string to verify content tools_data = json.loads(tools_data_str) assert len(tools_data) == 2 # The order of tools may not be guaranteed, so sort by name and description for comparison sorted_tools = sorted( - tools_data, key=lambda t: (t.get("name", ""), t.get("description", "")) + tools_data, key=lambda t: (t.get("name", ""), t.get("name", "")) ) # The function tool @@ -342,13 +339,13 @@ def get_weather(location: str) -> str: assert sorted_tools[1]["description"] == "Get weather information (tool object)" -def test_tool_execution(sentry_init, capture_events): +def test_tool_execution(sentry_init, capture_items): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Create a mock tool function def get_weather(location: str) -> str: @@ -366,25 +363,25 @@ def get_weather(location: str) -> str: assert result == "The weather in San Francisco is sunny" - (event,) = events - assert len(event["spans"]) == 1 - tool_span = event["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + tool_span = next(item.payload for item in items if item.type == "span") - assert tool_span["op"] == OP.GEN_AI_EXECUTE_TOOL - assert tool_span["description"] == "execute_tool get_weather" - assert tool_span["data"][SPANDATA.GEN_AI_TOOL_NAME] == "get_weather" + assert tool_span["attributes"]["sentry.op"] == OP.GEN_AI_EXECUTE_TOOL + assert tool_span["name"] == "execute_tool get_weather" + assert tool_span["attributes"][SPANDATA.GEN_AI_TOOL_NAME] == "get_weather" assert ( - tool_span["data"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + tool_span["attributes"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] == "Get the weather for a location" ) -def test_error_handling(sentry_init, capture_events, mock_genai_client): +def test_error_handling(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction") # Mock an error at the HTTP level with mock.patch.object( @@ -399,8 +396,8 @@ def test_error_handling(sentry_init, capture_events, mock_genai_client): ) # Should have both transaction and error events - assert len(events) == 2 - error_event, transaction_event = events + assert len([item for item in items if item.type == "transaction"]) == 1 + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "Exception" @@ -408,14 +405,14 @@ def test_error_handling(sentry_init, capture_events, mock_genai_client): assert error_event["exception"]["values"][0]["mechanism"]["type"] == "google_genai" -def test_streaming_generate_content(sentry_init, capture_events, mock_genai_client): +def test_streaming_generate_content(sentry_init, capture_items, mock_genai_client): """Test streaming with generate_content_stream, verifying chunk accumulation.""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Create streaming chunks - simulating a multi-chunk response # Chunk 1: First part of text with partial usage metadata @@ -497,40 +494,41 @@ def test_streaming_generate_content(sentry_init, capture_events, mock_genai_clie assert collected_chunks[1].candidates[0].content.parts[0].text == "How can I " assert collected_chunks[2].candidates[0].content.parts[0].text == "help you today?" - (event,) = events - - assert len(event["spans"]) == 1 - chat_span = event["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + chat_span = next(item.payload for item in items if item.type == "span") # Check that streaming flag is set on both spans - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True # Verify accumulated response text (all chunks combined) expected_full_text = "Hello! How can I help you today?" # Response text is stored as a JSON string - chat_response_text = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]) + chat_response_text = json.loads( + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] + ) assert chat_response_text == [expected_full_text] # Verify finish reasons (only the final chunk has a finish reason) # When there's a single finish reason, it's stored as a plain string (not JSON) - assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["data"] - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 25 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["attributes"] + assert chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 25 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 # Verify model name - assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" -def test_span_origin(sentry_init, capture_events, mock_genai_client): +def test_span_origin(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -543,22 +541,21 @@ def test_span_origin(sentry_init, capture_events, mock_genai_client): model="gemini-1.5-flash", contents="Test origin", config=config ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - for span in event["spans"]: - assert span["origin"] == "auto.ai.google_genai" + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.google_genai" -def test_response_without_usage_metadata( - sentry_init, capture_events, mock_genai_client -): + +def test_response_without_usage_metadata(sentry_init, capture_items, mock_genai_client): """Test handling of responses without usage metadata""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response without usage metadata response_json = { @@ -584,23 +581,22 @@ def test_response_without_usage_metadata( model="gemini-1.5-flash", contents="Test", config=config ) - (event,) = events - chat_span = event["spans"][0] + chat_span = next(item.payload for item in items if item.type == "span") # Usage data should not be present - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["data"] - assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS not in chat_span["data"] - assert SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS not in chat_span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["attributes"] + assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS not in chat_span["attributes"] + assert SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS not in chat_span["attributes"] -def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): +def test_multiple_candidates(sentry_init, capture_items, mock_genai_client): """Test handling of multiple response candidates""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Response with multiple candidates multi_candidate_json = { @@ -638,12 +634,11 @@ def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): model="gemini-1.5-flash", contents="Generate multiple", config=config ) - (event,) = events - chat_span = event["spans"][0] + chat_span = next(item.payload for item in items if item.type == "span") # Should capture all responses # Response text is stored as a JSON string when there are multiple responses - response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] if isinstance(response_text, str) and response_text.startswith("["): # It's a JSON array response_list = json.loads(response_text) @@ -654,18 +649,18 @@ def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): # Finish reasons are serialized as JSON finish_reasons = json.loads( - chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] ) assert finish_reasons == ["STOP", "MAX_TOKENS"] -def test_all_configuration_parameters(sentry_init, capture_events, mock_genai_client): +def test_all_configuration_parameters(sentry_init, capture_items, mock_genai_client): """Test that all configuration parameters are properly recorded""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -686,26 +681,25 @@ def test_all_configuration_parameters(sentry_init, capture_events, mock_genai_cl model="gemini-1.5-flash", contents="Test all params", config=config ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") # Check all parameters are recorded - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.8 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.95 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_K] == 40 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 2048 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_SEED] == 12345 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.8 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.95 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_K] == 40 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 2048 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_SEED] == 12345 -def test_empty_response(sentry_init, capture_events, mock_genai_client): +def test_empty_response(sentry_init, capture_items, mock_genai_client): """Test handling of minimal response with no content""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Minimal response with empty candidates array minimal_response_json = {"candidates": []} @@ -723,20 +717,20 @@ def test_empty_response(sentry_init, capture_events, mock_genai_client): assert response is not None assert len(response.candidates) == 0 - (event,) = events # Should still create spans even with empty candidates - assert len(event["spans"]) == 1 + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 def test_response_with_different_id_fields( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test handling of different response ID field names""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response with response_id and model_version response_json = { @@ -763,20 +757,21 @@ def test_response_with_different_id_fields( model="gemini-1.5-flash", contents="Test", config=create_test_config() ) - (event,) = events - chat_span = event["spans"][0] + chat_span = next(item.payload for item in items if item.type == "span") - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456" - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gemini-1.5-flash-001" + assert chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456" + assert ( + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] + == "gemini-1.5-flash-001" + ) -def test_tool_with_async_function(sentry_init, capture_events): +def test_tool_with_async_function(sentry_init): """Test that async tool functions are properly wrapped""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - capture_events() # Create an async tool function async def async_tool(param: str) -> str: @@ -792,14 +787,14 @@ async def async_tool(param: str) -> str: assert hasattr(wrapped_async_tool, "__wrapped__") # Should preserve original -def test_contents_as_none(sentry_init, capture_events, mock_genai_client): +def test_contents_as_none(sentry_init, capture_items, mock_genai_client): """Test handling when contents parameter is None""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -811,22 +806,21 @@ def test_contents_as_none(sentry_init, capture_events, mock_genai_client): model="gemini-1.5-flash", contents=None, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") # Should handle None contents gracefully - messages = invoke_span["data"].get(SPANDATA.GEN_AI_REQUEST_MESSAGES, []) + messages = invoke_span["attributes"].get(SPANDATA.GEN_AI_REQUEST_MESSAGES, []) # Should only have system message if any, not user message assert all(msg["role"] != "user" or msg["content"] is not None for msg in messages) -def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): +def test_tool_calls_extraction(sentry_init, capture_items, mock_genai_client): """Test extraction of tool/function calls from response""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response with function calls function_call_response_json = { @@ -875,14 +869,17 @@ def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): config=create_test_config(), ) - (event,) = events - chat_span = event["spans"][0] # The chat span + chat_span = next( + item.payload for item in items if item.type == "span" + ) # The chat span # Check that tool calls are extracted and stored - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["attributes"] # Parse the JSON string to verify content - tool_calls = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS]) + tool_calls = json.loads( + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + ) assert len(tool_calls) == 2 @@ -902,16 +899,14 @@ def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): assert json.loads(tool_calls[1]["arguments"]) == {"timezone": "PST"} -def test_google_genai_message_truncation( - sentry_init, capture_events, mock_genai_client -): +def test_google_genai_message_truncation(sentry_init, capture_items, mock_genai_client): """Test that large messages are truncated properly in Google GenAI integration.""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") large_content = ( "This is a very long message that will exceed our size limits. " * 1000 @@ -930,11 +925,10 @@ def test_google_genai_message_truncation( config=create_test_config(), ) - (event,) = events - invoke_span = event["spans"][0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] + invoke_span = next(item.payload for item in items if item.type == "span") + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] - messages_data = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -980,14 +974,14 @@ def test_google_genai_message_truncation( ], ) def test_embed_content( - sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client + sentry_init, capture_items, send_default_pii, include_prompts, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the HTTP response at the _api_client.request() level mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1006,47 +1000,49 @@ def test_embed_content( ], ) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "google_genai_embeddings" # Should have 1 span for embeddings - assert len(event["spans"]) == 1 - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (embed_span,) = spans # Check embeddings span - assert embed_span["op"] == OP.GEN_AI_EMBEDDINGS - assert embed_span["description"] == "embeddings text-embedding-004" - assert embed_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert embed_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" - assert embed_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + assert embed_span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert embed_span["name"] == "embeddings text-embedding-004" + assert embed_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert embed_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert ( + embed_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + ) # Check input texts if PII is allowed if send_default_pii and include_prompts: - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads( + embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + ) assert input_texts == [ "What is your name?", "What is your favorite color?", ] else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["attributes"] # Check usage data (sum of token counts from statistics: 10 + 15 = 25) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 -def test_embed_content_string_input(sentry_init, capture_events, mock_genai_client): +def test_embed_content_string_input(sentry_init, capture_items, mock_genai_client): """Test embed_content with a single string instead of list.""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Mock response with single embedding single_embed_response = { @@ -1074,25 +1070,25 @@ def test_embed_content_string_input(sentry_init, capture_events, mock_genai_clie contents="Single text input", ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # Check that single string is handled correctly - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads(embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) assert input_texts == ["Single text input"] # Should use token_count from statistics (5), not billable_character_count (10) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 -def test_embed_content_error_handling(sentry_init, capture_events, mock_genai_client): +def test_embed_content_error_handling(sentry_init, capture_items, mock_genai_client): """Test error handling in embed_content.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "event") # Mock an error at the HTTP level with mock.patch.object( @@ -1108,8 +1104,8 @@ def test_embed_content_error_handling(sentry_init, capture_events, mock_genai_cl ) # Should have both transaction and error events - assert len(events) == 2 - error_event, _ = events + assert len([item for item in items if item.type == "transaction"]) == 1 + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "Exception" @@ -1118,14 +1114,14 @@ def test_embed_content_error_handling(sentry_init, capture_events, mock_genai_cl def test_embed_content_without_statistics( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test embed_content response without statistics (older package versions).""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response without statistics (typical for older google-genai versions) # Embeddings exist but don't have the statistics field @@ -1150,21 +1146,21 @@ def test_embed_content_without_statistics( contents=["Test without statistics", "Another test"], ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # No usage tokens since there are no statistics in older versions # This is expected and the integration should handle it gracefully - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["attributes"] -def test_embed_content_span_origin(sentry_init, capture_events, mock_genai_client): +def test_embed_content_span_origin(sentry_init, capture_items, mock_genai_client): """Test that embed_content spans have correct origin.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1177,11 +1173,12 @@ def test_embed_content_span_origin(sentry_init, capture_events, mock_genai_clien contents=["Test origin"], ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - for span in event["spans"]: - assert span["origin"] == "auto.ai.google_genai" + + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.google_genai" @pytest.mark.asyncio @@ -1195,7 +1192,7 @@ def test_embed_content_span_origin(sentry_init, capture_events, mock_genai_clien ], ) async def test_async_embed_content( - sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client + sentry_init, capture_items, send_default_pii, include_prompts, mock_genai_client ): """Test async embed_content method.""" sentry_init( @@ -1203,7 +1200,7 @@ async def test_async_embed_content( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the async HTTP response mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1222,42 +1219,44 @@ async def test_async_embed_content( ], ) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "google_genai_embeddings_async" # Should have 1 span for embeddings - assert len(event["spans"]) == 1 - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (embed_span,) = spans # Check embeddings span - assert embed_span["op"] == OP.GEN_AI_EMBEDDINGS - assert embed_span["description"] == "embeddings text-embedding-004" - assert embed_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert embed_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" - assert embed_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + assert embed_span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert embed_span["name"] == "embeddings text-embedding-004" + assert embed_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert embed_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert ( + embed_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + ) # Check input texts if PII is allowed if send_default_pii and include_prompts: - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads( + embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + ) assert input_texts == [ "What is your name?", "What is your favorite color?", ] else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["attributes"] # Check usage data (sum of token counts from statistics: 10 + 15 = 25) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 @pytest.mark.asyncio async def test_async_embed_content_string_input( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test async embed_content with a single string instead of list.""" sentry_init( @@ -1265,7 +1264,7 @@ async def test_async_embed_content_string_input( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Mock response with single embedding single_embed_response = { @@ -1293,28 +1292,28 @@ async def test_async_embed_content_string_input( contents="Single text input", ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # Check that single string is handled correctly - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads(embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) assert input_texts == ["Single text input"] # Should use token_count from statistics (5), not billable_character_count (10) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 @pytest.mark.asyncio async def test_async_embed_content_error_handling( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test error handling in async embed_content.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "event") # Mock an error at the HTTP level with mock.patch.object( @@ -1330,8 +1329,8 @@ async def test_async_embed_content_error_handling( ) # Should have both transaction and error events - assert len(events) == 2 - error_event, _ = events + assert len([item for item in items if item.type == "transaction"]) == 1 + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "Exception" @@ -1341,14 +1340,14 @@ async def test_async_embed_content_error_handling( @pytest.mark.asyncio async def test_async_embed_content_without_statistics( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test async embed_content response without statistics (older package versions).""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response without statistics (typical for older google-genai versions) # Embeddings exist but don't have the statistics field @@ -1373,24 +1372,24 @@ async def test_async_embed_content_without_statistics( contents=["Test without statistics", "Another test"], ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # No usage tokens since there are no statistics in older versions # This is expected and the integration should handle it gracefully - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["attributes"] @pytest.mark.asyncio async def test_async_embed_content_span_origin( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test that async embed_content spans have correct origin.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1403,16 +1402,17 @@ async def test_async_embed_content_span_origin( contents=["Test origin"], ) - (event,) = events - + (event,) = [item.payload for item in items if item.type == "transaction"] assert event["contexts"]["trace"]["origin"] == "manual" - for span in event["spans"]: - assert span["origin"] == "auto.ai.google_genai" + + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.google_genai" # Integration tests for generate_content with different input message formats def test_generate_content_with_content_object( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with Content object input.""" sentry_init( @@ -1420,7 +1420,7 @@ def test_generate_content_with_content_object( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1437,10 +1437,9 @@ def test_generate_content_with_content_object( model="gemini-1.5-flash", contents=content, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [ @@ -1449,7 +1448,7 @@ def test_generate_content_with_content_object( def test_generate_content_with_dict_format( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with dict format input (ContentDict).""" sentry_init( @@ -1457,7 +1456,7 @@ def test_generate_content_with_dict_format( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1472,10 +1471,9 @@ def test_generate_content_with_dict_format( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [ @@ -1483,16 +1481,14 @@ def test_generate_content_with_dict_format( ] -def test_generate_content_with_file_data( - sentry_init, capture_events, mock_genai_client -): +def test_generate_content_with_file_data(sentry_init, capture_items, mock_genai_client): """Test generate_content with file_data (external file reference).""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1516,10 +1512,9 @@ def test_generate_content_with_file_data( model="gemini-1.5-flash", contents=content, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert len(messages[0]["content"]) == 2 @@ -1534,7 +1529,7 @@ def test_generate_content_with_file_data( def test_generate_content_with_inline_data( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with inline_data (binary data).""" sentry_init( @@ -1542,7 +1537,7 @@ def test_generate_content_with_inline_data( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1565,10 +1560,9 @@ def test_generate_content_with_inline_data( model="gemini-1.5-flash", contents=content, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert len(messages[0]["content"]) == 2 @@ -1580,7 +1574,7 @@ def test_generate_content_with_inline_data( def test_generate_content_with_function_response( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with function_response (tool result).""" sentry_init( @@ -1588,7 +1582,7 @@ def test_generate_content_with_function_response( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1622,10 +1616,9 @@ def test_generate_content_with_function_response( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 # First message is user message assert messages[0]["role"] == "tool" @@ -1635,7 +1628,7 @@ def test_generate_content_with_function_response( def test_generate_content_with_mixed_string_and_content( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with mixed string and Content objects in list.""" sentry_init( @@ -1643,7 +1636,7 @@ def test_generate_content_with_mixed_string_and_content( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1668,10 +1661,9 @@ def test_generate_content_with_mixed_string_and_content( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 # User message assert messages[0]["role"] == "user" @@ -1679,7 +1671,7 @@ def test_generate_content_with_mixed_string_and_content( def test_generate_content_with_part_object_directly( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with Part object directly (not wrapped in Content).""" sentry_init( @@ -1687,7 +1679,7 @@ def test_generate_content_with_part_object_directly( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1702,17 +1694,16 @@ def test_generate_content_with_part_object_directly( model="gemini-1.5-flash", contents=part, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [{"text": "Direct Part object", "type": "text"}] def test_generate_content_with_list_of_dicts( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """ Test generate_content with list of dict format inputs. @@ -1726,7 +1717,7 @@ def test_generate_content_with_list_of_dicts( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1745,17 +1736,16 @@ def test_generate_content_with_list_of_dicts( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [{"text": "Second user message", "type": "text"}] def test_generate_content_with_dict_inline_data( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with dict format containing inline_data.""" sentry_init( @@ -1763,7 +1753,7 @@ def test_generate_content_with_dict_inline_data( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1784,10 +1774,9 @@ def test_generate_content_with_dict_inline_data( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert len(messages[0]["content"]) == 2 @@ -1801,14 +1790,14 @@ def test_generate_content_with_dict_inline_data( def test_generate_content_without_parts_property_inline_data( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1825,10 +1814,9 @@ def test_generate_content_without_parts_property_inline_data( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 @@ -1845,14 +1833,14 @@ def test_generate_content_without_parts_property_inline_data( def test_generate_content_without_parts_property_inline_data_and_binary_data_within_string( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1874,10 +1862,9 @@ def test_generate_content_without_parts_property_inline_data_and_binary_data_wit model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" @@ -2162,7 +2149,9 @@ def test_extract_contents_messages_dict_inline_data(): """Test extract_contents_messages with dict containing inline_data""" content_dict = { "role": "user", - "parts": [{"inline_data": {"data": b"binary_data", "mime_type": "image/gif"}}], + "parts": [ + {"inline_data": {"attributes": b"binary_data", "mime_type": "image/gif"}} + ], } result = extract_contents_messages(content_dict) From ff9c5ec2f2eac0a7fa94b49b40cdd31e172c053f Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 09:52:20 +0200 Subject: [PATCH 13/36] test litellm --- tests/integrations/litellm/test_litellm.py | 477 +++++++++++---------- 1 file changed, 241 insertions(+), 236 deletions(-) diff --git a/tests/integrations/litellm/test_litellm.py b/tests/integrations/litellm/test_litellm.py index a8df5891ce..90807744e7 100644 --- a/tests/integrations/litellm/test_litellm.py +++ b/tests/integrations/litellm/test_litellm.py @@ -142,7 +142,7 @@ def __init__( def test_nonstreaming_chat_completion( reset_litellm_executor, sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -153,7 +153,7 @@ def test_nonstreaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -179,37 +179,36 @@ def test_nonstreaming_chat_completion( litellm_utils.executor.shutdown(wait=True) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "litellm test" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["attributes"] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 @pytest.mark.asyncio(loop_scope="session") @@ -224,7 +223,7 @@ def test_nonstreaming_chat_completion( ) async def test_async_nonstreaming_chat_completion( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -235,7 +234,7 @@ async def test_async_nonstreaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -262,37 +261,36 @@ async def test_async_nonstreaming_chat_completion( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "litellm test" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["attributes"] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 @pytest.mark.parametrize( @@ -307,7 +305,7 @@ async def test_async_nonstreaming_chat_completion( def test_streaming_chat_completion( reset_litellm_executor, sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -319,7 +317,7 @@ def test_streaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") messages = [{"role": "user", "content": "Hello!"}] @@ -350,20 +348,18 @@ def test_streaming_chat_completion( streaming_handler.executor.shutdown(wait=True) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio(loop_scope="session") @@ -378,7 +374,7 @@ def test_streaming_chat_completion( ) async def test_async_streaming_chat_completion( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -391,7 +387,7 @@ async def test_async_streaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -425,25 +421,23 @@ async def test_async_streaming_chat_completion( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True def test_embeddings_create( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -459,7 +453,7 @@ def test_embeddings_create( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="test-key") @@ -485,32 +479,34 @@ def test_embeddings_create( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["description"] == "embeddings text-embedding-ada-002" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-ada-002" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["name"] == "embeddings text-embedding-ada-002" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + assert ( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] + == "text-embedding-ada-002" + ) # Check that embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == ["Hello, world!"] @pytest.mark.asyncio(loop_scope="session") async def test_async_embeddings_create( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -526,7 +522,7 @@ async def test_async_embeddings_create( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="test-key") @@ -553,31 +549,33 @@ async def test_async_embeddings_create( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["description"] == "embeddings text-embedding-ada-002" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-ada-002" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["name"] == "embeddings text-embedding-ada-002" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + assert ( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] + == "text-embedding-ada-002" + ) # Check that embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == ["Hello, world!"] def test_embeddings_create_with_list_input( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -588,7 +586,7 @@ def test_embeddings_create_with_list_input( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="test-key") @@ -614,22 +612,21 @@ def test_embeddings_create_with_list_input( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" # Check that list of embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == [ "First text", "Second text", @@ -640,7 +637,7 @@ def test_embeddings_create_with_list_input( @pytest.mark.asyncio(loop_scope="session") async def test_async_embeddings_create_with_list_input( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -651,7 +648,7 @@ async def test_async_embeddings_create_with_list_input( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="test-key") @@ -678,22 +675,21 @@ async def test_async_embeddings_create_with_list_input( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" # Check that list of embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == [ "First text", "Second text", @@ -703,7 +699,7 @@ async def test_async_embeddings_create_with_list_input( def test_embeddings_no_pii( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -714,7 +710,7 @@ def test_embeddings_no_pii( traces_sample_rate=1.0, send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="test-key") @@ -740,27 +736,26 @@ def test_embeddings_no_pii( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS # Check that embeddings input is NOT captured when PII is disabled - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] @pytest.mark.asyncio(loop_scope="session") async def test_async_embeddings_no_pii( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -771,7 +766,7 @@ async def test_async_embeddings_no_pii( traces_sample_rate=1.0, send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="test-key") @@ -798,31 +793,30 @@ async def test_async_embeddings_no_pii( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS # Check that embeddings input is NOT captured when PII is disabled - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] def test_exception_handling( - reset_litellm_executor, sentry_init, capture_events, get_rate_limit_model_response + reset_litellm_executor, sentry_init, capture_items, get_rate_limit_model_response ): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event") messages = [{"role": "user", "content": "Hello!"}] @@ -843,22 +837,24 @@ def test_exception_handling( client=client, ) - # Should have error event and transaction - assert len(events) >= 1 # Find the error event - error_events = [e for e in events if e.get("level") == "error"] + error_events = [ + item.payload + for item in items + if item.type == "event" and item.payload.get("level") == "error" + ] assert len(error_events) == 1 @pytest.mark.asyncio(loop_scope="session") async def test_async_exception_handling( - sentry_init, capture_events, get_rate_limit_model_response + sentry_init, capture_items, get_rate_limit_model_response ): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event") messages = [{"role": "user", "content": "Hello!"}] @@ -879,17 +875,19 @@ async def test_async_exception_handling( client=client, ) - # Should have error event and transaction - assert len(events) >= 1 # Find the error event - error_events = [e for e in events if e.get("level") == "error"] + error_events = [ + item.payload + for item in items + if item.type == "event" and item.payload.get("level") == "error" + ] assert len(error_events) == 1 def test_span_origin( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -897,7 +895,7 @@ def test_span_origin( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -923,16 +921,17 @@ def test_span_origin( litellm_utils.executor.shutdown(wait=True) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.litellm" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.litellm" def test_multiple_providers( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, nonstreaming_anthropic_model_response, @@ -943,7 +942,7 @@ def test_multiple_providers( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction") messages = [{"role": "user", "content": "Hello!"}] @@ -1015,18 +1014,19 @@ def test_multiple_providers( litellm_utils.executor.shutdown(wait=True) + events = [item.payload for item in items if item.type == "transaction"] assert len(events) == 3 - for i in range(3): - span = events[i]["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + for span in spans: # The provider should be detected by litellm.get_llm_provider - assert SPANDATA.GEN_AI_SYSTEM in span["data"] + assert SPANDATA.GEN_AI_SYSTEM in span["attributes"] @pytest.mark.asyncio(loop_scope="session") async def test_async_multiple_providers( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, nonstreaming_anthropic_model_response, @@ -1037,7 +1037,7 @@ async def test_async_multiple_providers( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -1112,18 +1112,19 @@ async def test_async_multiple_providers( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) + events = [item.payload for item in items if item.type == "transaction"] assert len(events) == 3 - for i in range(3): - span = events[i]["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + for span in spans: # The provider should be detected by litellm.get_llm_provider - assert SPANDATA.GEN_AI_SYSTEM in span["data"] + assert SPANDATA.GEN_AI_SYSTEM in span["attributes"] def test_additional_parameters( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1132,7 +1133,7 @@ def test_additional_parameters( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = OpenAI(api_key="test-key") @@ -1162,26 +1163,27 @@ def test_additional_parameters( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 @pytest.mark.asyncio(loop_scope="session") async def test_async_additional_parameters( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1190,7 +1192,7 @@ async def test_async_additional_parameters( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = AsyncOpenAI(api_key="test-key") @@ -1221,26 +1223,27 @@ async def test_async_additional_parameters( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 def test_no_integration( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1248,7 +1251,7 @@ def test_no_integration( sentry_init( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = OpenAI(api_key="test-key") @@ -1273,13 +1276,12 @@ def test_no_integration( litellm_utils.executor.shutdown(wait=True) - (event,) = events - # Should still have the transaction, but no child spans since integration is off - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 0 @@ -1287,7 +1289,7 @@ def test_no_integration( @pytest.mark.asyncio(loop_scope="session") async def test_async_no_integration( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1295,7 +1297,7 @@ async def test_async_no_integration( sentry_init( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = AsyncOpenAI(api_key="test-key") @@ -1321,24 +1323,23 @@ async def test_async_no_integration( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events - # Should still have the transaction, but no child spans since integration is off - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 0 -def test_response_without_usage(sentry_init, capture_events): +def test_response_without_usage(sentry_init, capture_items): """Test handling of responses without usage information.""" sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -1366,12 +1367,11 @@ def test_response_without_usage(sentry_init, capture_events): datetime.now(), ) - (event,) = events - (span,) = event["spans"] + (span,) = (item.payload for item in items if item.type == "span") # Span should still be created even without usage info - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat gpt-3.5-turbo" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat gpt-3.5-turbo" def test_integration_setup(sentry_init): @@ -1387,14 +1387,14 @@ def test_integration_setup(sentry_init): assert _failure_callback in (litellm.failure_callback or []) -def test_litellm_message_truncation(sentry_init, capture_events): +def test_litellm_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in LiteLLM integration.""" sentry_init( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") large_content = ( "This is a very long message that will exceed our size limits. " * 1000 @@ -1422,25 +1422,24 @@ def test_litellm_message_truncation(sentry_init, capture_events): datetime.now(), ) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] chat_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT + span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT ] assert len(chat_spans) > 0 chat_span = chat_spans[0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"] - messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + + tx = next(item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 @@ -1452,7 +1451,7 @@ def test_litellm_message_truncation(sentry_init, capture_events): def test_binary_content_encoding_image_url( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1461,7 +1460,7 @@ def test_binary_content_encoding_image_url( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1498,15 +1497,16 @@ def test_binary_content_encoding_image_url( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) blob_item = next( ( @@ -1530,7 +1530,7 @@ def test_binary_content_encoding_image_url( @pytest.mark.asyncio(loop_scope="session") async def test_async_binary_content_encoding_image_url( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1539,7 +1539,7 @@ async def test_async_binary_content_encoding_image_url( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1577,15 +1577,16 @@ async def test_async_binary_content_encoding_image_url( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) blob_item = next( ( @@ -1609,7 +1610,7 @@ async def test_async_binary_content_encoding_image_url( def test_binary_content_encoding_mixed_content( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1618,7 +1619,7 @@ def test_binary_content_encoding_mixed_content( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1656,15 +1657,16 @@ def test_binary_content_encoding_mixed_content( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content_items = [ item for msg in messages_data if "content" in msg for item in msg["content"] @@ -1676,7 +1678,7 @@ def test_binary_content_encoding_mixed_content( @pytest.mark.asyncio(loop_scope="session") async def test_async_binary_content_encoding_mixed_content( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1685,7 +1687,7 @@ async def test_async_binary_content_encoding_mixed_content( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1724,15 +1726,16 @@ async def test_async_binary_content_encoding_mixed_content( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content_items = [ item for msg in messages_data if "content" in msg for item in msg["content"] @@ -1744,7 +1747,7 @@ async def test_async_binary_content_encoding_mixed_content( def test_binary_content_encoding_uri_type( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1753,7 +1756,7 @@ def test_binary_content_encoding_uri_type( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1789,15 +1792,16 @@ def test_binary_content_encoding_uri_type( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) uri_item = next( ( @@ -1816,7 +1820,7 @@ def test_binary_content_encoding_uri_type( @pytest.mark.asyncio(loop_scope="session") async def test_async_binary_content_encoding_uri_type( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1825,7 +1829,7 @@ async def test_async_binary_content_encoding_uri_type( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1862,15 +1866,16 @@ async def test_async_binary_content_encoding_uri_type( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) uri_item = next( ( From b92ae36dcfa27debc12b7c5bcaa7793434fec187 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 10:12:04 +0200 Subject: [PATCH 14/36] test huggingface_hub --- .../huggingface_hub/test_huggingface_hub.py | 231 +++++++++++------- 1 file changed, 139 insertions(+), 92 deletions(-) diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 9dd15ca4b5..6b4402bc52 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -471,7 +471,7 @@ def mock_hf_chat_completion_api_streaming_tools(httpx_mock): @pytest.mark.parametrize("include_prompts", [True, False]) def test_text_generation( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_text_generation_api: "Any", @@ -481,7 +481,7 @@ def test_text_generation( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = InferenceClient(model="test-model") @@ -492,23 +492,22 @@ def test_text_generation( details=True, ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.text_completion" - assert span["description"] == "text_completion test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.text_completion" + assert span["name"] == "text_completion test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "text_completion", @@ -516,6 +515,14 @@ def test_text_generation( "gen_ai.response.finish_reasons": "length", "gen_ai.response.streaming": False, "gen_ai.usage.total_tokens": 10, + "sentry.environment": "production", + "sentry.op": "gen_ai.text_completion", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -528,10 +535,10 @@ def test_text_generation( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data # text generation does not set the response model - assert "gen_ai.response.model" not in span["data"] + assert "gen_ai.response.model" not in span["attributes"] @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -539,7 +546,7 @@ def test_text_generation( @pytest.mark.parametrize("include_prompts", [True, False]) def test_text_generation_streaming( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_text_generation_api_streaming: "Any", @@ -549,7 +556,7 @@ def test_text_generation_streaming( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = InferenceClient(model="test-model") @@ -561,23 +568,22 @@ def test_text_generation_streaming( ): pass - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.text_completion" - assert span["description"] == "text_completion test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.text_completion" + assert span["name"] == "text_completion test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "text_completion", @@ -585,6 +591,14 @@ def test_text_generation_streaming( "gen_ai.response.finish_reasons": "length", "gen_ai.response.streaming": True, "gen_ai.usage.total_tokens": 10, + "sentry.environment": "production", + "sentry.op": "gen_ai.text_completion", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -597,10 +611,10 @@ def test_text_generation_streaming( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data # text generation does not set the response model - assert "gen_ai.response.model" not in span["data"] + assert "gen_ai.response.model" not in span["attributes"] @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -608,7 +622,7 @@ def test_text_generation_streaming( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api: "Any", @@ -618,7 +632,7 @@ def test_chat_completion( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -628,23 +642,22 @@ def test_chat_completion( stream=False, ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -655,6 +668,14 @@ def test_chat_completion( "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 8, "gen_ai.usage.total_tokens": 18, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -671,7 +692,7 @@ def test_chat_completion( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -679,7 +700,7 @@ def test_chat_completion( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_streaming( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api_streaming: "Any", @@ -689,7 +710,7 @@ def test_chat_completion_streaming( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -701,23 +722,22 @@ def test_chat_completion_streaming( ) ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -725,6 +745,14 @@ def test_chat_completion_streaming( "gen_ai.response.finish_reasons": "stop", "gen_ai.response.model": "test-model-123", "gen_ai.response.streaming": True, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -744,15 +772,15 @@ def test_chat_completion_streaming( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) def test_chat_completion_api_error( - sentry_init: "Any", capture_events: "Any", mock_hf_api_with_errors: "Any" + sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -762,32 +790,29 @@ def test_chat_completion_api_error( messages=[{"role": "user", "content": "Hello!"}], ) - ( - error, - transaction, - ) = events - + (error,) = (item.payload for item in items if item.type == "event") assert error["exception"]["values"][0]["mechanism"]["type"] == "huggingface_hub" assert not error["exception"]["values"][0]["mechanism"]["handled"] + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" - assert span["status"] == "internal_error" - assert span.get("tags", {}).get("status") == "internal_error" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" + assert span["status"] == "error" + (transaction,) = (item.payload for item in items if item.type == "transaction") assert ( error["contexts"]["trace"]["trace_id"] == transaction["contexts"]["trace"]["trace_id"] @@ -795,18 +820,26 @@ def test_chat_completion_api_error( expected_data = { "gen_ai.operation.name": "chat", "gen_ai.request.model": "test-model", + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) def test_span_status_error( - sentry_init: "Any", capture_events: "Any", mock_hf_api_with_errors: "Any" + sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -816,22 +849,22 @@ def test_span_status_error( messages=[{"role": "user", "content": "Hello!"}], ) - (error, transaction) = events + (error,) = [item.payload for item in items if item.type == "event"] assert error["level"] == "error" + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -839,7 +872,7 @@ def test_span_status_error( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_with_tools( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api_tools: "Any", @@ -849,7 +882,7 @@ def test_chat_completion_with_tools( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -875,23 +908,22 @@ def test_chat_completion_with_tools( tool_choice="auto", ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -902,6 +934,14 @@ def test_chat_completion_with_tools( "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 8, "gen_ai.usage.total_tokens": 18, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -919,7 +959,7 @@ def test_chat_completion_with_tools( assert "gen_ai.response.text" not in expected_data assert "gen_ai.response.tool_calls" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -927,7 +967,7 @@ def test_chat_completion_with_tools( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_streaming_with_tools( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api_streaming_tools: "Any", @@ -937,7 +977,7 @@ def test_chat_completion_streaming_with_tools( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -966,23 +1006,22 @@ def test_chat_completion_streaming_with_tools( ) ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -991,6 +1030,14 @@ def test_chat_completion_streaming_with_tools( "gen_ai.response.finish_reasons": "tool_calls", "gen_ai.response.model": "test-model-123", "gen_ai.response.streaming": True, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -1014,4 +1061,4 @@ def test_chat_completion_streaming_with_tools( assert "gen_ai.response.text" not in expected_data assert "gen_ai.response.tool_calls" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data From 907ca1d981ac652ce8e31015f5addd4af04316c1 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 10:31:41 +0200 Subject: [PATCH 15/36] test langchain --- .../integrations/langchain/test_langchain.py | 590 ++++++++++-------- 1 file changed, 319 insertions(+), 271 deletions(-) diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 498a5d6f4a..f709d12129 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -97,7 +97,7 @@ def _llm_type(self) -> str: def test_langchain_text_completion( sentry_init, - capture_events, + capture_items, get_model_response, ): sentry_init( @@ -109,7 +109,7 @@ def test_langchain_text_completion( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") model_response = get_model_response( Completion( @@ -149,25 +149,29 @@ def test_langchain_text_completion( input_text = "What is the capital of France?" model.invoke(input_text, config={"run_name": "my-snazzy-pipeline"}) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["description"] == "text_completion gpt-3.5-turbo" - assert llm_span["data"]["gen_ai.system"] == "openai" - assert llm_span["data"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline" - assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo" - assert llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris." - assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25 - assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15 + assert llm_span["name"] == "text_completion gpt-3.5-turbo" + assert llm_span["attributes"]["gen_ai.system"] == "openai" + assert llm_span["attributes"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline" + assert llm_span["attributes"]["gen_ai.request.model"] == "gpt-3.5-turbo" + assert ( + llm_span["attributes"]["gen_ai.response.text"] + == "The capital of France is Paris." + ) + assert llm_span["attributes"]["gen_ai.usage.total_tokens"] == 25 + assert llm_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert llm_span["attributes"]["gen_ai.usage.output_tokens"] == 15 @pytest.mark.skipif( @@ -196,7 +200,7 @@ def test_langchain_text_completion( ) def test_langchain_create_agent( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, system_instructions_content, @@ -213,7 +217,7 @@ def test_langchain_create_agent( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") model_response = get_model_response( nonstreaming_responses_model_response, @@ -250,22 +254,23 @@ def test_langchain_create_agent( }, ) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" assert tx["contexts"]["trace"]["origin"] == "manual" - chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") + spans = [item.payload for item in items if item.type == "span"] + chat_spans = list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat") assert len(chat_spans) == 1 - assert chat_spans[0]["origin"] == "auto.ai.langchain" + assert chat_spans[0]["attributes"]["sentry.origin"] == "auto.ai.langchain" - assert chat_spans[0]["data"]["gen_ai.system"] == "openai-chat" - assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 10 - assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 20 - assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 30 + assert chat_spans[0]["attributes"]["gen_ai.system"] == "openai-chat" + assert chat_spans[0]["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert chat_spans[0]["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert chat_spans[0]["attributes"]["gen_ai.usage.total_tokens"] == 30 if send_default_pii and include_prompts: assert ( - chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + chat_spans[0]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hello, how can I help you?" ) @@ -276,7 +281,9 @@ def test_langchain_create_agent( "type": "text", "content": "You are very powerful assistant, but don't know current events", } - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) else: assert [ { @@ -287,11 +294,17 @@ def test_langchain_create_agent( "type": "text", "content": "Be concise and clear.", }, - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("attributes", {}) @pytest.mark.skipif( @@ -309,7 +322,7 @@ def test_langchain_create_agent( ) def test_tool_execution_span( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -324,7 +337,7 @@ def test_tool_execution_span( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") responses = responses_tool_call_model_responses( tool_name="get_word_length", @@ -400,60 +413,71 @@ def test_tool_execution_span( }, ) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" assert tx["contexts"]["trace"]["origin"] == "manual" - chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") + spans = [item.payload for item in items if item.type == "span"] + chat_spans = list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat") assert len(chat_spans) == 2 - tool_exec_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.execute_tool") + tool_exec_spans = list( + x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.execute_tool" + ) assert len(tool_exec_spans) == 1 tool_exec_span = tool_exec_spans[0] - assert chat_spans[0]["origin"] == "auto.ai.langchain" - assert chat_spans[1]["origin"] == "auto.ai.langchain" - assert tool_exec_span["origin"] == "auto.ai.langchain" + assert chat_spans[0]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert chat_spans[1]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert tool_exec_span["attributes"]["sentry.origin"] == "auto.ai.langchain" - assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142 - assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50 - assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192 - assert chat_spans[0]["data"]["gen_ai.system"] == "openai-chat" + assert chat_spans[0]["attributes"]["gen_ai.usage.input_tokens"] == 142 + assert chat_spans[0]["attributes"]["gen_ai.usage.output_tokens"] == 50 + assert chat_spans[0]["attributes"]["gen_ai.usage.total_tokens"] == 192 + assert chat_spans[0]["attributes"]["gen_ai.system"] == "openai-chat" - assert chat_spans[1]["data"]["gen_ai.usage.input_tokens"] == 89 - assert chat_spans[1]["data"]["gen_ai.usage.output_tokens"] == 28 - assert chat_spans[1]["data"]["gen_ai.usage.total_tokens"] == 117 - assert chat_spans[1]["data"]["gen_ai.system"] == "openai-chat" + assert chat_spans[1]["attributes"]["gen_ai.usage.input_tokens"] == 89 + assert chat_spans[1]["attributes"]["gen_ai.usage.output_tokens"] == 28 + assert chat_spans[1]["attributes"]["gen_ai.usage.total_tokens"] == 117 + assert chat_spans[1]["attributes"]["gen_ai.system"] == "openai-chat" if send_default_pii and include_prompts: - assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_TOOL_INPUT] + assert "word" in tool_exec_span["attributes"][SPANDATA.GEN_AI_TOOL_INPUT] - assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "5" in chat_spans[1]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] # Verify tool calls are recorded when PII is enabled - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get("data", {}), ( + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get( + "attributes", {} + ), ( "Tool calls should be recorded when send_default_pii=True and include_prompts=True" ) - tool_calls_data = chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = chat_spans[0]["attributes"][ + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS + ] assert isinstance(tool_calls_data, str) assert "get_word_length" in tool_calls_data else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("data", {}) - assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("attributes", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("attributes", {}) # Verify tool calls are NOT recorded when PII is disabled assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[0].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" ) assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[1].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" @@ -461,7 +485,7 @@ def test_tool_execution_span( # Verify that available tools are always recorded regardless of PII settings for chat_span in chat_spans: - tools_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert "get_word_length" in tools_data @@ -488,7 +512,7 @@ def test_tool_execution_span( ) def test_langchain_openai_tools_agent( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, system_instructions_content, @@ -505,7 +529,7 @@ def test_langchain_openai_tools_agent( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") prompt = ChatPromptTemplate.from_messages( [ @@ -700,40 +724,47 @@ def test_langchain_openai_tools_agent( with start_transaction(): list(agent_executor.stream({"input": "How many letters in the word eudca"})) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" assert tx["contexts"]["trace"]["origin"] == "manual" - invoke_agent_span = next(x for x in tx["spans"] if x["op"] == "gen_ai.invoke_agent") - chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") - tool_exec_span = next(x for x in tx["spans"] if x["op"] == "gen_ai.execute_tool") + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span = next( + x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.invoke_agent" + ) + chat_spans = list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat") + tool_exec_span = next( + x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.execute_tool" + ) assert len(chat_spans) == 2 - assert invoke_agent_span["origin"] == "auto.ai.langchain" - assert chat_spans[0]["origin"] == "auto.ai.langchain" - assert chat_spans[1]["origin"] == "auto.ai.langchain" - assert tool_exec_span["origin"] == "auto.ai.langchain" + assert invoke_agent_span["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert chat_spans[0]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert chat_spans[1]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert tool_exec_span["attributes"]["sentry.origin"] == "auto.ai.langchain" # We can't guarantee anything about the "shape" of the langchain execution graph - assert len(list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")) > 0 + assert ( + len(list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat")) > 0 + ) # Token usage is only available in newer versions of langchain (v0.2+) # where usage_metadata is supported on AIMessageChunk - if "gen_ai.usage.input_tokens" in chat_spans[0]["data"]: - assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142 - assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50 - assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192 + if "gen_ai.usage.input_tokens" in chat_spans[0]["attributes"]: + assert chat_spans[0]["attributes"]["gen_ai.usage.input_tokens"] == 142 + assert chat_spans[0]["attributes"]["gen_ai.usage.output_tokens"] == 50 + assert chat_spans[0]["attributes"]["gen_ai.usage.total_tokens"] == 192 - if "gen_ai.usage.input_tokens" in chat_spans[1]["data"]: - assert chat_spans[1]["data"]["gen_ai.usage.input_tokens"] == 89 - assert chat_spans[1]["data"]["gen_ai.usage.output_tokens"] == 28 - assert chat_spans[1]["data"]["gen_ai.usage.total_tokens"] == 117 + if "gen_ai.usage.input_tokens" in chat_spans[1]["attributes"]: + assert chat_spans[1]["attributes"]["gen_ai.usage.input_tokens"] == 89 + assert chat_spans[1]["attributes"]["gen_ai.usage.output_tokens"] == 28 + assert chat_spans[1]["attributes"]["gen_ai.usage.total_tokens"] == 117 if send_default_pii and include_prompts: - assert "5" in chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_TOOL_INPUT] - assert 5 == int(tool_exec_span["data"][SPANDATA.GEN_AI_TOOL_OUTPUT]) + assert "5" in chat_spans[0]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "word" in tool_exec_span["attributes"][SPANDATA.GEN_AI_TOOL_INPUT] + assert 5 == int(tool_exec_span["attributes"][SPANDATA.GEN_AI_TOOL_OUTPUT]) param_id = request.node.callspec.id if "string" in param_id: @@ -742,7 +773,9 @@ def test_langchain_openai_tools_agent( "type": "text", "content": "You are very powerful assistant, but don't know current events", } - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) else: assert [ { @@ -753,15 +786,21 @@ def test_langchain_openai_tools_agent( "type": "text", "content": "Be concise and clear.", }, - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) - assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "5" in chat_spans[1]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] # Verify tool calls are recorded when PII is enabled - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get("data", {}), ( + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get( + "attributes", {} + ), ( "Tool calls should be recorded when send_default_pii=True and include_prompts=True" ) - tool_calls_data = chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = chat_spans[0]["attributes"][ + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS + ] assert isinstance(tool_calls_data, (list, str)) # Could be serialized if isinstance(tool_calls_data, str): assert "get_word_length" in tool_calls_data @@ -770,45 +809,55 @@ def test_langchain_openai_tools_agent( tool_call_str = str(tool_calls_data) assert "get_word_length" in tool_call_str else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("data", {}) - assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("data", {}) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("attributes", {}) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[1].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("attributes", {}) # Verify tool calls are NOT recorded when PII is disabled assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[0].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" ) assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[1].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" ) # Verify finish_reasons is always an array of strings - assert chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == [ + assert chat_spans[0]["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == [ "function_call" ] - assert chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["stop"] + assert chat_spans[1]["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == [ + "stop" + ] # Verify that available tools are always recorded regardless of PII settings for chat_span in chat_spans: - tools_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert tools_data is not None, ( "Available tools should always be recorded regardless of PII settings" ) assert "get_word_length" in tools_data -def test_langchain_error(sentry_init, capture_events): +def test_langchain_error(sentry_init, capture_items): global llm_type llm_type = "acme-llm" @@ -817,7 +866,7 @@ def test_langchain_error(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "transaction", "span") prompt = ChatPromptTemplate.from_messages( [ @@ -843,11 +892,11 @@ def test_langchain_error(sentry_init, capture_events): with start_transaction(), pytest.raises(ValueError): list(agent_executor.stream({"input": "How many letters in the word eudca"})) - error = events[0] + error = next(item.payload for item in items if item.type == "event") assert error["level"] == "error" -def test_span_status_error(sentry_init, capture_events): +def test_span_status_error(sentry_init, capture_items): global llm_type llm_type = "acme-llm" @@ -855,7 +904,7 @@ def test_span_status_error(sentry_init, capture_events): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") with start_transaction(name="test"): prompt = ChatPromptTemplate.from_messages( @@ -884,10 +933,13 @@ def test_span_status_error(sentry_init, capture_events): with pytest.raises(ValueError): list(agent_executor.stream({"input": "How many letters in the word eudca"})) - (error, transaction) = events + error = next(item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" @@ -1100,7 +1152,7 @@ def test_langchain_callback_list_existing_callback(sentry_init): assert handler is sentry_callback -def test_langchain_message_role_mapping(sentry_init, capture_events): +def test_langchain_message_role_mapping(sentry_init, capture_items): """Test that message roles are properly normalized in langchain integration.""" global llm_type llm_type = "openai-chat" @@ -1110,7 +1162,7 @@ def test_langchain_message_role_mapping(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") prompt = ChatPromptTemplate.from_messages( [ @@ -1146,19 +1198,18 @@ def test_langchain_message_role_mapping(sentry_init, capture_events): with start_transaction(): list(agent_executor.stream({"input": test_input})) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find spans with gen_ai operation that should have message data gen_ai_spans = [ - span for span in tx.get("spans", []) if span.get("op", "").startswith("gen_ai") + span + for span in spans + if span["attributes"].get("sentry.op", "").startswith("gen_ai") ] # Check if any span has message data with normalized roles message_data_found = False for span in gen_ai_spans: - span_data = span.get("data", {}) + span_data = span.get("attributes", {}) if SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data: message_data_found = True messages_data = span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] @@ -1239,7 +1290,7 @@ def test_langchain_message_role_normalization_units(): assert normalized[5] == "string message" # String message unchanged -def test_langchain_message_truncation(sentry_init, capture_events): +def test_langchain_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in Langchain integration.""" from langchain_core.outputs import LLMResult, Generation @@ -1248,7 +1299,7 @@ def test_langchain_message_truncation(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) @@ -1291,23 +1342,23 @@ def test_langchain_message_truncation(sentry_init, capture_events): ) callback.on_llm_end(response=response, run_id=run_id) - assert len(events) > 0 - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["data"]["gen_ai.operation.name"] == "text_completion" - assert llm_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline" + assert llm_span["attributes"]["gen_ai.operation.name"] == "text_completion" + assert llm_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"] - messages_data = llm_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["attributes"] + messages_data = llm_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -1327,7 +1378,7 @@ def test_langchain_message_truncation(sentry_init, capture_events): ], ) def test_langchain_embeddings_sync( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that sync embedding methods (embed_documents, embed_query) are properly traced.""" try: @@ -1340,7 +1391,7 @@ def test_langchain_embeddings_sync( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call with mock.patch.object( @@ -1362,27 +1413,28 @@ def test_langchain_embeddings_sync( assert len(result) == 2 mock_embed_documents.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["description"] == "embeddings text-embedding-ada-002" - assert embeddings_span["origin"] == "auto.ai.langchain" - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["name"] == "embeddings text-embedding-ada-002" + assert embeddings_span["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured based on PII settings if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "Hello world" in input_data @@ -1391,7 +1443,9 @@ def test_langchain_embeddings_sync( assert "Hello world" in input_data assert "Test document" in input_data else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get("data", {}) + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get( + "attributes", {} + ) @pytest.mark.parametrize( @@ -1402,7 +1456,7 @@ def test_langchain_embeddings_sync( ], ) def test_langchain_embeddings_embed_query( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that embed_query method is properly traced.""" try: @@ -1415,7 +1469,7 @@ def test_langchain_embeddings_embed_query( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call with mock.patch.object( @@ -1436,32 +1490,35 @@ def test_langchain_embeddings_embed_query( assert len(result) == 3 mock_embed_query.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured based on PII settings if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "What is the capital of France?" in input_data else: assert "What is the capital of France?" in input_data else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get("data", {}) + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get( + "attributes", {} + ) @pytest.mark.parametrize( @@ -1473,7 +1530,7 @@ def test_langchain_embeddings_embed_query( ) @pytest.mark.asyncio async def test_langchain_embeddings_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that async embedding methods (aembed_documents, aembed_query) are properly traced.""" try: @@ -1486,7 +1543,7 @@ async def test_langchain_embeddings_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") async def mock_aembed_documents(self, texts): return [[0.1, 0.2, 0.3] for _ in texts] @@ -1512,38 +1569,41 @@ async def mock_aembed_documents(self, texts): assert len(result) == 2 mock_aembed.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["description"] == "embeddings text-embedding-ada-002" - assert embeddings_span["origin"] == "auto.ai.langchain" - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["name"] == "embeddings text-embedding-ada-002" + assert embeddings_span["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured based on PII settings if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "Async hello" in input_data or "Async test document" in input_data else: assert "Async hello" in input_data or "Async test document" in input_data else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get("data", {}) + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get( + "attributes", {} + ) @pytest.mark.asyncio -async def test_langchain_embeddings_aembed_query(sentry_init, capture_events): +async def test_langchain_embeddings_aembed_query(sentry_init, capture_items): """Test that aembed_query method is properly traced.""" try: from langchain_openai import OpenAIEmbeddings @@ -1555,7 +1615,7 @@ async def test_langchain_embeddings_aembed_query(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") async def mock_aembed_query(self, text): return [0.1, 0.2, 0.3] @@ -1579,24 +1639,25 @@ async def mock_aembed_query(self, text): assert len(result) == 3 mock_aembed.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "Async query test" in input_data @@ -1604,7 +1665,7 @@ async def mock_aembed_query(self, text): assert "Async query test" in input_data -def test_langchain_embeddings_no_model_name(sentry_init, capture_events): +def test_langchain_embeddings_no_model_name(sentry_init, capture_items): """Test embeddings when model name is not available.""" try: from langchain_openai import OpenAIEmbeddings @@ -1615,7 +1676,7 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_events): integrations=[LangchainIntegration(include_prompts=False)], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call and remove model attribute with mock.patch.object( @@ -1635,28 +1696,26 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_events): with start_transaction(name="test_embeddings_no_model"): embeddings.embed_documents(["Test"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["description"] == "embeddings" - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" + assert embeddings_span["name"] == "embeddings" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" # Model name should not be set if not available assert ( - "gen_ai.request.model" not in embeddings_span["data"] - or embeddings_span["data"]["gen_ai.request.model"] is None + "gen_ai.request.model" not in embeddings_span["attributes"] + or embeddings_span["attributes"]["gen_ai.request.model"] is None ) -def test_langchain_embeddings_integration_disabled(sentry_init, capture_events): +def test_langchain_embeddings_integration_disabled(sentry_init, capture_items): """Test that embeddings are not traced when integration is disabled.""" try: from langchain_openai import OpenAIEmbeddings @@ -1665,7 +1724,7 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_events): # Initialize without LangchainIntegration sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") with mock.patch.object( OpenAIEmbeddings, @@ -1680,18 +1739,17 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_events): embeddings.embed_documents(["Test"]) # Check that no embeddings spans were created - if events: - tx = events[0] - embeddings_spans = [ - span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.embeddings" - ] - # Should be empty since integration is disabled - assert len(embeddings_spans) == 0 + spans = [item.payload for item in items if item.type == "span"] + embeddings_spans = [ + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" + ] + # Should be empty since integration is disabled + assert len(embeddings_spans) == 0 -def test_langchain_embeddings_multiple_providers(sentry_init, capture_events): +def test_langchain_embeddings_multiple_providers(sentry_init, capture_items): """Test that embeddings work with different providers.""" try: from langchain_openai import OpenAIEmbeddings, AzureOpenAIEmbeddings @@ -1703,7 +1761,7 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock both providers with mock.patch.object( @@ -1731,26 +1789,24 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_events): openai_embeddings.embed_documents(["OpenAI test"]) azure_embeddings.embed_documents(["Azure test"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings spans embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] # Should have 2 spans, one for each provider assert len(embeddings_spans) == 2 # Verify both spans have proper data for span in embeddings_spans: - assert span["data"]["gen_ai.operation.name"] == "embeddings" - assert span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["data"] + assert span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert span["attributes"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["attributes"] -def test_langchain_embeddings_error_handling(sentry_init, capture_events): +def test_langchain_embeddings_error_handling(sentry_init, capture_items): """Test that errors in embeddings are properly captured.""" try: from langchain_openai import OpenAIEmbeddings @@ -1762,7 +1818,7 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the API call to raise an error with mock.patch.object( @@ -1781,15 +1837,16 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_events): with pytest.raises(ValueError): embeddings.embed_documents(["Test"]) - # The error should be captured - assert len(events) >= 1 - # We should have both the transaction and potentially an error event - [e for e in events if e.get("level") == "error"] + [ + item.payload + for item in items + if item.type == "event" and item.payload.get("level") == "error" + ] # Note: errors might not be auto-captured depending on SDK settings, # but the span should still be created -def test_langchain_embeddings_multiple_calls(sentry_init, capture_events): +def test_langchain_embeddings_multiple_calls(sentry_init, capture_items): """Test that multiple embeddings calls within a transaction are all traced.""" try: from langchain_openai import OpenAIEmbeddings @@ -1801,7 +1858,7 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API calls with mock.patch.object( @@ -1828,32 +1885,31 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_events): # Call embed_documents again embeddings.embed_documents(["Third batch"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings spans - should have 3 (2 embed_documents + 1 embed_query) embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 3 # Verify all spans have proper data for span in embeddings_spans: - assert span["data"]["gen_ai.operation.name"] == "embeddings" - assert span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["data"] + assert span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert span["attributes"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["attributes"] # Verify the input data is different for each span input_data_list = [ - span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] for span in embeddings_spans + span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + for span in embeddings_spans ] # They should all be different (different inputs) assert len(set(str(data) for data in input_data_list)) == 3 -def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): +def test_langchain_embeddings_span_hierarchy(sentry_init, capture_items): """Test that embeddings spans are properly nested within parent spans.""" try: from langchain_openai import OpenAIEmbeddings @@ -1865,7 +1921,7 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call with mock.patch.object( @@ -1884,15 +1940,15 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): with sentry_sdk.start_span(op="custom", name="custom operation"): embeddings.embed_documents(["Test within custom span"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find all spans embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] + + tx = next(item.payload for item in items if item.type == "transaction") custom_spans = [span for span in tx.get("spans", []) if span.get("op") == "custom"] assert len(embeddings_spans) == 1 @@ -1902,11 +1958,11 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): embeddings_span = embeddings_spans[0] custom_span = custom_spans[0] - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" assert custom_span["description"] == "custom operation" -def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_events): +def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_items): """Test that embeddings correctly handle both list and string inputs.""" try: from langchain_openai import OpenAIEmbeddings @@ -1918,7 +1974,7 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API calls with mock.patch.object( @@ -1943,21 +1999,19 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e # embed_query takes a string embeddings.embed_query("Single string query") - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings spans embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 2 # Both should have input data captured as lists for span in embeddings_spans: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["data"] - input_data = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["attributes"] + input_data = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Input should be normalized to list format if isinstance(input_data, str): # If serialized, should contain the input text @@ -1975,7 +2029,7 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e ) def test_langchain_response_model_extraction( sentry_init, - capture_events, + capture_items, response_metadata_model, expected_model, ): @@ -1984,7 +2038,7 @@ def test_langchain_response_model_extraction( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) @@ -2009,25 +2063,22 @@ def test_langchain_response_model_extraction( response = Mock(generations=[[generation]]) callback.on_llm_end(response=response, run_id=run_id) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["data"]["gen_ai.operation.name"] == "text_completion" + assert llm_span["attributes"]["gen_ai.operation.name"] == "text_completion" if expected_model is not None: - assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"] - assert llm_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == expected_model + assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["attributes"] + assert llm_span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == expected_model else: - assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("attributes", {}) # Tests for multimodal content transformation functions @@ -2286,13 +2337,13 @@ def test_transform_google_file_data(self): ], ) def test_langchain_ai_system_detection( - sentry_init, capture_events, ai_type, expected_system + sentry_init, capture_items, ai_type, expected_system ): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) @@ -2312,23 +2363,20 @@ def test_langchain_ai_system_detection( response = Mock(generations=[[generation]]) callback.on_llm_end(response=response, run_id=run_id) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] if expected_system is not None: - assert llm_span["data"][SPANDATA.GEN_AI_SYSTEM] == expected_system + assert llm_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == expected_system else: - assert SPANDATA.GEN_AI_SYSTEM not in llm_span.get("data", {}) + assert SPANDATA.GEN_AI_SYSTEM not in llm_span.get("attributes", {}) class TestTransformLangchainMessageContent: From b2542976f0f43bd1160f07f2a6783919d9861588 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 10:35:14 +0200 Subject: [PATCH 16/36] test langgraph --- .../integrations/langgraph/test_langgraph.py | 386 ++++++++++-------- 1 file changed, 205 insertions(+), 181 deletions(-) diff --git a/tests/integrations/langgraph/test_langgraph.py b/tests/integrations/langgraph/test_langgraph.py index 2a385d8a78..e1a3baa0a8 100644 --- a/tests/integrations/langgraph/test_langgraph.py +++ b/tests/integrations/langgraph/test_langgraph.py @@ -147,7 +147,7 @@ def test_langgraph_integration_init(): ], ) def test_state_graph_compile( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test StateGraph.compile() wrapper creates proper create_agent span.""" sentry_init( @@ -155,7 +155,7 @@ def test_state_graph_compile( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") graph = MockStateGraph() def original_compile(self, *args, **kwargs): @@ -171,21 +171,23 @@ def original_compile(self, *args, **kwargs): assert compiled_graph is not None assert compiled_graph.name == "test_graph" - tx = events[0] - assert tx["type"] == "transaction" - - agent_spans = [span for span in tx["spans"] if span["op"] == OP.GEN_AI_CREATE_AGENT] + spans = [item.payload for item in items if item.type == "span"] + agent_spans = [ + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_CREATE_AGENT + ] assert len(agent_spans) == 1 agent_span = agent_spans[0] - assert agent_span["description"] == "create_agent test_graph" - assert agent_span["origin"] == "auto.ai.langgraph" - assert agent_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "create_agent" - assert agent_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" - assert agent_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "test-model" - assert SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["data"] - - tools_data = agent_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + assert agent_span["name"] == "create_agent test_graph" + assert agent_span["attributes"]["sentry.origin"] == "auto.ai.langgraph" + assert agent_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "create_agent" + assert agent_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" + assert agent_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "test-model" + assert SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["attributes"] + + tools_data = agent_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert tools_data == ["search_tool", "calculator"] assert len(tools_data) == 2 assert "search_tool" in tools_data @@ -201,14 +203,14 @@ def original_compile(self, *args, **kwargs): (False, False), ], ) -def test_pregel_invoke(sentry_init, capture_events, send_default_pii, include_prompts): +def test_pregel_invoke(sentry_init, capture_items, send_default_pii, include_prompts): """Test Pregel.invoke() wrapper creates proper invoke_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -245,26 +247,26 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span["description"] == "invoke_agent test_graph" - assert invoke_span["origin"] == "auto.ai.langgraph" - assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "test_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" + assert invoke_span["name"] == "invoke_agent test_graph" + assert invoke_span["attributes"]["sentry.origin"] == "auto.ai.langgraph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" + assert invoke_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == "test_graph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["attributes"] - request_messages = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + request_messages = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] if isinstance(request_messages, str): import json @@ -273,11 +275,11 @@ def original_invoke(self, *args, **kwargs): assert len(request_messages) == 1 assert request_messages[0]["content"] == "Of course! How can I assist you?" - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["attributes"] + tool_calls_data = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json @@ -287,9 +289,11 @@ def original_invoke(self, *args, **kwargs): assert tool_calls_data[0]["id"] == "call_test_123" assert tool_calls_data[0]["function"]["name"] == "search_tool" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get( + "attributes", {} + ) @pytest.mark.parametrize( @@ -301,14 +305,14 @@ def original_invoke(self, *args, **kwargs): (False, False), ], ) -def test_pregel_ainvoke(sentry_init, capture_events, send_default_pii, include_prompts): +def test_pregel_ainvoke(sentry_init, capture_items, send_default_pii, include_prompts): """Test Pregel.ainvoke() async wrapper creates proper invoke_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("What's the weather like?", name="user")]} pregel = MockPregelInstance("async_graph") @@ -341,30 +345,30 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span["description"] == "invoke_agent async_graph" - assert invoke_span["origin"] == "auto.ai.langgraph" - assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "async_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "async_graph" + assert invoke_span["name"] == "invoke_agent async_graph" + assert invoke_span["attributes"]["sentry.origin"] == "auto.ai.langgraph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" + assert invoke_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == "async_graph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == "async_graph" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["attributes"] - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["attributes"] + tool_calls_data = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json @@ -374,19 +378,21 @@ async def run_test(): assert tool_calls_data[0]["id"] == "call_weather_456" assert tool_calls_data[0]["function"]["name"] == "get_weather" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get( + "attributes", {} + ) -def test_pregel_invoke_error(sentry_init, capture_events): +def test_pregel_invoke_error(sentry_init, capture_items): """Test error handling during graph execution.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("This will fail")]} pregel = MockPregelInstance("error_graph") @@ -397,25 +403,26 @@ def original_invoke(self, *args, **kwargs): wrapped_invoke = _wrap_pregel_invoke(original_invoke) wrapped_invoke(pregel, test_state) - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span.get("status") == "internal_error" - assert invoke_span.get("tags", {}).get("status") == "internal_error" + assert invoke_span.get("status") == "error" -def test_pregel_ainvoke_error(sentry_init, capture_events): +def test_pregel_ainvoke_error(sentry_init, capture_items): """Test error handling during async graph execution.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("This will fail async")]} pregel = MockPregelInstance("async_error_graph") @@ -431,24 +438,25 @@ async def run_error_test(): asyncio.run(run_error_test()) - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span.get("status") == "internal_error" - assert invoke_span.get("tags", {}).get("status") == "internal_error" + assert invoke_span.get("status") == "error" -def test_span_origin(sentry_init, capture_events): +def test_span_origin(sentry_init, capture_items): """Test that span origins are correctly set.""" sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") graph = MockStateGraph() @@ -461,16 +469,17 @@ def original_compile(self, *args, **kwargs): wrapped_compile = _wrap_state_graph_compile(original_compile) wrapped_compile(graph) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["contexts"]["trace"]["origin"] == "manual" - for span in tx["spans"]: - assert span["origin"] == "auto.ai.langgraph" + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.langgraph" @pytest.mark.parametrize("graph_name", ["my_graph", None, ""]) def test_pregel_invoke_with_different_graph_names( - sentry_init, capture_events, graph_name + sentry_init, capture_items, graph_name ): """Test Pregel.invoke() with different graph name scenarios.""" sentry_init( @@ -478,7 +487,7 @@ def test_pregel_invoke_with_different_graph_names( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") pregel = MockPregelInstance(graph_name) if graph_name else MockPregelInstance() if not graph_name: @@ -492,25 +501,27 @@ def original_invoke(self, *args, **kwargs): wrapped_invoke = _wrap_pregel_invoke(original_invoke) wrapped_invoke(pregel, {"messages": []}) - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] if graph_name and graph_name.strip(): - assert invoke_span["description"] == "invoke_agent my_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == graph_name - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == graph_name + assert invoke_span["name"] == "invoke_agent my_graph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == graph_name + assert invoke_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == graph_name else: - assert invoke_span["description"] == "invoke_agent" - assert SPANDATA.GEN_AI_PIPELINE_NAME not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_AGENT_NAME not in invoke_span.get("data", {}) + assert invoke_span["name"] == "invoke_agent" + assert SPANDATA.GEN_AI_PIPELINE_NAME not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_AGENT_NAME not in invoke_span.get("attributes", {}) -def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_events): +def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_items): """ Test that invoke_agent spans include aggregated usage data from context_wrapper. This verifies the new functionality added to track token usage in invoke_agent spans. @@ -519,7 +530,7 @@ def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_events): integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -564,29 +575,29 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has usage data - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.usage.input_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.output_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.total_tokens" in invoke_agent_span["data"] + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.usage.input_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.output_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.total_tokens" in invoke_agent_span["attributes"] # The usage should match the mock_usage values (aggregated across all calls) - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 30 -def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_events): +def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_items): """ Test that invoke_agent spans include aggregated usage data from context_wrapper. This verifies the new functionality added to track token usage in invoke_agent spans. @@ -595,7 +606,7 @@ def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_events): integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -643,29 +654,29 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has usage data - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.usage.input_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.output_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.total_tokens" in invoke_agent_span["data"] + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.usage.input_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.output_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.total_tokens" in invoke_agent_span["attributes"] # The usage should match the mock_usage values (aggregated across all calls) - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 30 -def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_events): +def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_items): """ Test that invoke_agent spans show aggregated usage across multiple LLM calls (e.g., when tools are used and multiple API calls are made). @@ -674,7 +685,7 @@ def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_e integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -730,23 +741,23 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has aggregated usage from both API calls # Total: 10 + 20 = 30 input tokens, 5 + 15 = 20 output tokens, 15 + 35 = 50 total - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 50 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 50 -def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_events): +def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_items): """ Test that invoke_agent spans show aggregated usage across multiple LLM calls (e.g., when tools are used and multiple API calls are made). @@ -755,7 +766,7 @@ def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_ integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -814,23 +825,23 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has aggregated usage from both API calls # Total: 10 + 20 = 30 input tokens, 5 + 15 = 20 output tokens, 15 + 35 = 50 total - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 50 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 50 -def test_pregel_invoke_span_includes_response_model(sentry_init, capture_events): +def test_pregel_invoke_span_includes_response_model(sentry_init, capture_items): """ Test that invoke_agent spans include the response model. When an agent makes multiple LLM calls, it should report the last model used. @@ -839,7 +850,7 @@ def test_pregel_invoke_span_includes_response_model(sentry_init, capture_events) integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -884,23 +895,25 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has response model - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) -def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_events): +def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_items): """ Test that invoke_agent spans include the response model. When an agent makes multiple LLM calls, it should report the last model used. @@ -909,7 +922,7 @@ def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_events integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -957,23 +970,25 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has response model - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) -def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_events): +def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_items): """ Test that when an agent makes multiple LLM calls (e.g., with tools), the invoke_agent span reports the last response model used. @@ -982,7 +997,7 @@ def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_events integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -1040,22 +1055,24 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span uses the LAST response model - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) -def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_events): +def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_items): """ Test that when an agent makes multiple LLM calls (e.g., with tools), the invoke_agent span reports the last response model used. @@ -1064,7 +1081,7 @@ def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_event integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -1125,19 +1142,21 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span uses the LAST response model - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) def test_complex_message_parsing(): @@ -1187,14 +1206,14 @@ def test_complex_message_parsing(): assert result[2]["function_call"]["name"] == "search" -def test_extraction_functions_complex_scenario(sentry_init, capture_events): +def test_extraction_functions_complex_scenario(sentry_init, capture_items): """Test extraction functions with complex scenarios including multiple messages and edge cases.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") pregel = MockPregelInstance("complex_graph") test_state = {"messages": [MockMessage("Complex request", name="user")]} @@ -1235,21 +1254,23 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["attributes"] + response_text = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == "Final response" - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["attributes"] import json - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): tool_calls_data = json.loads(tool_calls_data) @@ -1260,14 +1281,14 @@ def original_invoke(self, *args, **kwargs): assert tool_calls_data[1]["function"]["name"] == "calculate" -def test_langgraph_message_role_mapping(sentry_init, capture_events): +def test_langgraph_message_role_mapping(sentry_init, capture_items): """Test that Langgraph integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock a langgraph message with mixed roles class MockMessage: @@ -1297,17 +1318,18 @@ def __init__(self, content, message_type="human"): ) wrapped_invoke(pregel, state_data) - (event,) = events - span = event["spans"][0] + span = next(item.payload for item in items if item.type == "span") # Verify that the span was created correctly - assert span["op"] == "gen_ai.invoke_agent" + assert span["attributes"]["sentry.op"] == "gen_ai.invoke_agent" # If messages were captured, verify role mapping - if SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"]: + if SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"]: import json - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) # Find messages with specific content to verify role mapping ai_message = next( @@ -1331,7 +1353,7 @@ def __init__(self, content, message_type="human"): assert "ai" not in roles -def test_langgraph_message_truncation(sentry_init, capture_events): +def test_langgraph_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in Langgraph integration.""" import json @@ -1340,7 +1362,7 @@ def test_langgraph_message_truncation(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") large_content = ( "This is a very long message that will exceed our size limits. " * 1000 @@ -1365,23 +1387,25 @@ def original_invoke(self, *args, **kwargs): result = wrapped_invoke(pregel, test_state) assert result is not None - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"].get("sentry.op") == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) > 0 invoke_span = invoke_spans[0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] - messages_data = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + + (tx,) = (item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 From 6f7a0547707a4ed22b8e99fce7c3c948d7ca74c1 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 10:52:20 +0200 Subject: [PATCH 17/36] accept any as sdk version --- .../huggingface_hub/test_huggingface_hub.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 6b4402bc52..98abbb00fa 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -596,7 +596,7 @@ def test_text_generation_streaming( "sentry.origin": "auto.ai.huggingface_hub", "sentry.release": mock.ANY, "sentry.sdk.name": "sentry.python", - "sentry.sdk.version": "2.58.0", + "sentry.sdk.version": mock.ANY, "sentry.segment.id": mock.ANY, "sentry.segment.name": "test", "thread.id": mock.ANY, @@ -673,7 +673,7 @@ def test_chat_completion( "sentry.origin": "auto.ai.huggingface_hub", "sentry.release": mock.ANY, "sentry.sdk.name": "sentry.python", - "sentry.sdk.version": "2.58.0", + "sentry.sdk.version": mock.ANY, "sentry.segment.id": mock.ANY, "sentry.segment.name": "test", "thread.id": mock.ANY, @@ -750,7 +750,7 @@ def test_chat_completion_streaming( "sentry.origin": "auto.ai.huggingface_hub", "sentry.release": mock.ANY, "sentry.sdk.name": "sentry.python", - "sentry.sdk.version": "2.58.0", + "sentry.sdk.version": mock.ANY, "sentry.segment.id": mock.ANY, "sentry.segment.name": "test", "thread.id": mock.ANY, @@ -825,7 +825,7 @@ def test_chat_completion_api_error( "sentry.origin": "auto.ai.huggingface_hub", "sentry.release": mock.ANY, "sentry.sdk.name": "sentry.python", - "sentry.sdk.version": "2.58.0", + "sentry.sdk.version": mock.ANY, "sentry.segment.id": mock.ANY, "sentry.segment.name": "test", "thread.id": mock.ANY, @@ -939,7 +939,7 @@ def test_chat_completion_with_tools( "sentry.origin": "auto.ai.huggingface_hub", "sentry.release": mock.ANY, "sentry.sdk.name": "sentry.python", - "sentry.sdk.version": "2.58.0", + "sentry.sdk.version": mock.ANY, "sentry.segment.id": mock.ANY, "sentry.segment.name": "test", "thread.id": mock.ANY, @@ -1035,7 +1035,7 @@ def test_chat_completion_streaming_with_tools( "sentry.origin": "auto.ai.huggingface_hub", "sentry.release": mock.ANY, "sentry.sdk.name": "sentry.python", - "sentry.sdk.version": "2.58.0", + "sentry.sdk.version": mock.ANY, "sentry.segment.id": mock.ANY, "sentry.segment.name": "test", "thread.id": mock.ANY, From 4f871a422c8e6b69abe5160e3629b84550b46f26 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 12:46:10 +0200 Subject: [PATCH 18/36] pydantic-ai tests --- .../pydantic_ai/test_pydantic_ai.py | 695 ++++++++++-------- 1 file changed, 369 insertions(+), 326 deletions(-) diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index 50ce155f5b..fe34dd0f5d 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -53,7 +53,7 @@ def inner(): @pytest.mark.asyncio -async def test_agent_run_async(sentry_init, capture_events, get_test_agent): +async def test_agent_run_async(sentry_init, capture_items, get_test_agent): """ Test that the integration creates spans for async agent runs. """ @@ -63,7 +63,7 @@ async def test_agent_run_async(sentry_init, capture_events, get_test_agent): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() result = await test_agent.run("Test input") @@ -71,8 +71,7 @@ async def test_agent_run_async(sentry_init, capture_events, get_test_agent): assert result is not None assert result.output is not None - (transaction,) = events - spans = transaction["spans"] + (transaction,) = (item.payload for item in items if item.type == "transaction") # Verify transaction (the transaction IS the invoke_agent span) assert transaction["transaction"] == "invoke_agent test_agent" @@ -81,28 +80,31 @@ async def test_agent_run_async(sentry_init, capture_events, get_test_agent): # The transaction itself should have invoke_agent data assert transaction["contexts"]["trace"]["op"] == "gen_ai.invoke_agent" + spans = [item.payload for item in items if item.type == "span"] # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Check chat span chat_span = chat_spans[0] - assert "chat" in chat_span["description"] - assert chat_span["data"]["gen_ai.operation.name"] == "chat" - assert chat_span["data"]["gen_ai.response.streaming"] is False - assert "gen_ai.request.messages" in chat_span["data"] - assert "gen_ai.usage.input_tokens" in chat_span["data"] - assert "gen_ai.usage.output_tokens" in chat_span["data"] + assert "chat" in chat_span["name"] + assert chat_span["attributes"]["gen_ai.operation.name"] == "chat" + assert chat_span["attributes"]["gen_ai.response.streaming"] is False + assert "gen_ai.request.messages" in chat_span["attributes"] + assert "gen_ai.usage.input_tokens" in chat_span["attributes"] + assert "gen_ai.usage.output_tokens" in chat_span["attributes"] @pytest.mark.asyncio -async def test_agent_run_async_model_error(sentry_init, capture_events): +async def test_agent_run_async_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") def failing_model(messages, info): raise RuntimeError("model exploded") @@ -115,17 +117,17 @@ def failing_model(messages, info): with pytest.raises(RuntimeError, match="model exploded"): await agent.run("Test input") - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["status"] == "internal_error" + assert spans[0]["status"] == "error" @pytest.mark.asyncio -async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_agent): +async def test_agent_run_async_usage_data(sentry_init, capture_items, get_test_agent): """ Test that the invoke_agent span includes token usage and model data. """ @@ -135,7 +137,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_ send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() result = await test_agent.run("Test input") @@ -143,8 +145,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_ assert result is not None assert result.output is not None - (transaction,) = events - + (transaction,) = (item.payload for item in items if item.type == "transaction") # Verify transaction (the transaction IS the invoke_agent span) assert transaction["transaction"] == "invoke_agent test_agent" @@ -170,7 +171,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_ assert trace_data["gen_ai.response.model"] == "test" # Test model name -def test_agent_run_sync(sentry_init, capture_events, get_test_agent): +def test_agent_run_sync(sentry_init, capture_items, get_test_agent): """ Test that the integration creates spans for sync agent runs. """ @@ -180,7 +181,7 @@ def test_agent_run_sync(sentry_init, capture_events, get_test_agent): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() result = test_agent.run_sync("Test input") @@ -188,29 +189,31 @@ def test_agent_run_sync(sentry_init, capture_events, get_test_agent): assert result is not None assert result.output is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Verify transaction + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_agent" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.pydantic_ai" # Find span types - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Verify streaming flag is False for sync for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is False + assert chat_span["attributes"]["gen_ai.response.streaming"] is False -def test_agent_run_sync_model_error(sentry_init, capture_events): +def test_agent_run_sync_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") def failing_model(messages, info): raise RuntimeError("model exploded") @@ -223,17 +226,17 @@ def failing_model(messages, info): with pytest.raises(RuntimeError, match="model exploded"): agent.run_sync("Test input") - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["status"] == "internal_error" + assert spans[0]["status"] == "error" @pytest.mark.asyncio -async def test_agent_run_stream(sentry_init, capture_events, get_test_agent): +async def test_agent_run_stream(sentry_init, capture_items, get_test_agent): """ Test that the integration creates spans for streaming agent runs. """ @@ -243,7 +246,7 @@ async def test_agent_run_stream(sentry_init, capture_events, get_test_agent): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() async with test_agent.run_stream("Test input") as result: @@ -251,31 +254,33 @@ async def test_agent_run_stream(sentry_init, capture_events, get_test_agent): async for _ in result.stream_output(): pass - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Verify transaction + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_agent" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.pydantic_ai" # Find chat spans - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Verify streaming flag is True for streaming for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is True - assert "gen_ai.request.messages" in chat_span["data"] - assert "gen_ai.usage.input_tokens" in chat_span["data"] + assert chat_span["attributes"]["gen_ai.response.streaming"] is True + assert "gen_ai.request.messages" in chat_span["attributes"] + assert "gen_ai.usage.input_tokens" in chat_span["attributes"] # Streaming responses should still have output data assert ( - "gen_ai.response.text" in chat_span["data"] - or "gen_ai.response.model" in chat_span["data"] + "gen_ai.response.text" in chat_span["attributes"] + or "gen_ai.response.model" in chat_span["attributes"] ) @pytest.mark.asyncio -async def test_agent_run_stream_events(sentry_init, capture_events, get_test_agent): +async def test_agent_run_stream_events(sentry_init, capture_items, get_test_agent): """ Test that run_stream_events creates spans (it uses run internally, so non-streaming). """ @@ -285,30 +290,31 @@ async def test_agent_run_stream_events(sentry_init, capture_events, get_test_age send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Consume all events test_agent = get_test_agent() async for _ in test_agent.run_stream_events("Test input"): pass - (transaction,) = events - # Verify transaction + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_agent" # Find chat spans - spans = transaction["spans"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # run_stream_events uses run() internally, so streaming should be False for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is False + assert chat_span["attributes"]["gen_ai.response.streaming"] is False @pytest.mark.asyncio -async def test_agent_with_tools(sentry_init, capture_events, get_test_agent): +async def test_agent_with_tools(sentry_init, capture_items, get_test_agent): """ Test that tool execution creates execute_tool spans. """ @@ -325,34 +331,39 @@ def add_numbers(a: int, b: int) -> int: """Add two numbers together.""" return a + b - events = capture_events() + items = capture_items("transaction", "span") result = await test_agent.run("What is 5 + 3?") assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Check tool span tool_span = tool_spans[0] - assert "execute_tool" in tool_span["description"] - assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in tool_span["data"] - assert "gen_ai.tool.output" in tool_span["data"] + assert "execute_tool" in tool_span["name"] + assert tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + assert tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in tool_span["attributes"] + assert "gen_ai.tool.output" in tool_span["attributes"] # Check chat spans have available_tools for chat_span in chat_spans: - assert "gen_ai.request.available_tools" in chat_span["data"] - available_tools_str = chat_span["data"]["gen_ai.request.available_tools"] + assert "gen_ai.request.available_tools" in chat_span["attributes"] + available_tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] # Available tools is serialized as a string assert "add_numbers" in available_tools_str @@ -363,7 +374,7 @@ def add_numbers(a: int, b: int) -> int: ) @pytest.mark.asyncio async def test_agent_with_tool_model_retry( - sentry_init, capture_events, get_test_agent, handled_tool_call_exceptions + sentry_init, capture_items, get_test_agent, handled_tool_call_exceptions ): """ Test that a handled exception is captured when a tool raises ModelRetry. @@ -391,47 +402,51 @@ def add_numbers(a: int, b: int) -> float: raise ModelRetry(message="Try again with the same arguments.") return a + b - events = capture_events() + items = capture_items("event", "transaction", "span") result = await test_agent.run("What is 5 + 3?") assert result is not None if handled_tool_call_exceptions: - (error, transaction) = events - else: - (transaction,) = events - spans = transaction["spans"] - - if handled_tool_call_exceptions: + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" assert error["exception"]["values"][0]["mechanism"]["handled"] + spans = [item.payload for item in items if item.type == "span"] # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Check tool spans model_retry_tool_span = tool_spans[0] - assert "execute_tool" in model_retry_tool_span["description"] - assert model_retry_tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert model_retry_tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in model_retry_tool_span["data"] + assert "execute_tool" in model_retry_tool_span["name"] + assert ( + model_retry_tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + ) + assert model_retry_tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in model_retry_tool_span["attributes"] tool_span = tool_spans[1] - assert "execute_tool" in tool_span["description"] - assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in tool_span["data"] - assert "gen_ai.tool.output" in tool_span["data"] + assert "execute_tool" in tool_span["name"] + assert tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + assert tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in tool_span["attributes"] + assert "gen_ai.tool.output" in tool_span["attributes"] # Check chat spans have available_tools for chat_span in chat_spans: - assert "gen_ai.request.available_tools" in chat_span["data"] - available_tools_str = chat_span["data"]["gen_ai.request.available_tools"] + assert "gen_ai.request.available_tools" in chat_span["attributes"] + available_tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] # Available tools is serialized as a string assert "add_numbers" in available_tools_str @@ -442,7 +457,7 @@ def add_numbers(a: int, b: int) -> float: ) @pytest.mark.asyncio async def test_agent_with_tool_validation_error( - sentry_init, capture_events, get_test_agent, handled_tool_call_exceptions + sentry_init, capture_items, get_test_agent, handled_tool_call_exceptions ): """ Test that a handled exception is captured when a tool has unsatisfiable constraints. @@ -464,7 +479,7 @@ def add_numbers(a: Annotated[int, Field(gt=0, lt=0)], b: int) -> int: """Add two numbers together.""" return a + b - events = capture_events() + items = capture_items("event", "transaction", "span") result = None with pytest.raises(UnexpectedModelBehavior): @@ -473,42 +488,45 @@ def add_numbers(a: Annotated[int, Field(gt=0, lt=0)], b: int) -> int: assert result is None if handled_tool_call_exceptions: - (error, model_behaviour_error, transaction) = events - else: ( + error, model_behaviour_error, - transaction, - ) = events - spans = transaction["spans"] - - if handled_tool_call_exceptions: + ) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" assert error["exception"]["values"][0]["mechanism"]["handled"] - # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Check tool spans model_retry_tool_span = tool_spans[0] - assert "execute_tool" in model_retry_tool_span["description"] - assert model_retry_tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert model_retry_tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in model_retry_tool_span["data"] + assert "execute_tool" in model_retry_tool_span["name"] + assert ( + model_retry_tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + ) + assert model_retry_tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in model_retry_tool_span["attributes"] # Check chat spans have available_tools for chat_span in chat_spans: - assert "gen_ai.request.available_tools" in chat_span["data"] - available_tools_str = chat_span["data"]["gen_ai.request.available_tools"] + assert "gen_ai.request.available_tools" in chat_span["attributes"] + available_tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] # Available tools is serialized as a string assert "add_numbers" in available_tools_str @pytest.mark.asyncio -async def test_agent_with_tools_streaming(sentry_init, capture_events, get_test_agent): +async def test_agent_with_tools_streaming(sentry_init, capture_items, get_test_agent): """ Test that tool execution works correctly with streaming. """ @@ -525,37 +543,40 @@ def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b - events = capture_events() + items = capture_items("transaction", "span") async with test_agent.run_stream("What is 7 times 8?") as result: async for _ in result.stream_output(): pass - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find span types - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Verify streaming flag is True for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is True + assert chat_span["attributes"]["gen_ai.response.streaming"] is True # Check tool span tool_span = tool_spans[0] - assert tool_span["data"]["gen_ai.tool.name"] == "multiply" - assert "gen_ai.tool.input" in tool_span["data"] - assert "gen_ai.tool.output" in tool_span["data"] + assert tool_span["attributes"]["gen_ai.tool.name"] == "multiply" + assert "gen_ai.tool.input" in tool_span["attributes"] + assert "gen_ai.tool.output" in tool_span["attributes"] @pytest.mark.asyncio -async def test_model_settings( - sentry_init, capture_events, get_test_agent_with_settings -): +async def test_model_settings(sentry_init, capture_items, get_test_agent_with_settings): """ Test that model settings are captured in spans. """ @@ -564,23 +585,24 @@ async def test_model_settings( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent_with_settings = get_test_agent_with_settings() await test_agent_with_settings.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find chat span - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] # Check that model settings are captured - assert chat_span["data"].get("gen_ai.request.temperature") == 0.7 - assert chat_span["data"].get("gen_ai.request.max_tokens") == 100 - assert chat_span["data"].get("gen_ai.request.top_p") == 0.9 + assert chat_span["attributes"].get("gen_ai.request.temperature") == 0.7 + assert chat_span["attributes"].get("gen_ai.request.max_tokens") == 100 + assert chat_span["attributes"].get("gen_ai.request.top_p") == 0.9 @pytest.mark.asyncio @@ -594,7 +616,7 @@ async def test_model_settings( ], ) async def test_system_prompt_attribute( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """ Test that system prompts are included as the first message. @@ -611,21 +633,24 @@ async def test_system_prompt_attribute( send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Hello") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # The transaction IS the invoke_agent span, check for messages in chat spans instead - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] if send_default_pii and include_prompts: - system_instructions = chat_span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + system_instructions = chat_span["attributes"][ + SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS + ] assert json.loads(system_instructions) == [ { "type": "text", @@ -633,11 +658,11 @@ async def test_system_prompt_attribute( } ] else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["attributes"] @pytest.mark.asyncio -async def test_error_handling(sentry_init, capture_events): +async def test_error_handling(sentry_init, capture_items): """ Test error handling in agent execution. """ @@ -653,14 +678,13 @@ async def test_error_handling(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") # Simple run that should succeed await agent.run("Hello") # At minimum, we should have a transaction - assert len(events) >= 1 - transaction = [e for e in events if e.get("type") == "transaction"][0] + transaction = next(item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_error" # Transaction should complete successfully (status key may not exist if no error) trace_status = transaction["contexts"]["trace"].get("status") @@ -668,7 +692,7 @@ async def test_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_without_pii(sentry_init, capture_events, get_test_agent): +async def test_without_pii(sentry_init, capture_items, get_test_agent): """ Test that PII is not captured when send_default_pii is False. """ @@ -678,25 +702,26 @@ async def test_without_pii(sentry_init, capture_events, get_test_agent): send_default_pii=False, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Sensitive input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Verify that messages and response text are not captured for span in chat_spans: - assert "gen_ai.request.messages" not in span["data"] - assert "gen_ai.response.text" not in span["data"] + assert "gen_ai.request.messages" not in span["attributes"] + assert "gen_ai.response.text" not in span["attributes"] @pytest.mark.asyncio -async def test_without_pii_tools(sentry_init, capture_events, get_test_agent): +async def test_without_pii_tools(sentry_init, capture_items, get_test_agent): """ Test that tool input/output are not captured when send_default_pii is False. """ @@ -713,24 +738,27 @@ def sensitive_tool(data: str) -> str: """A tool with sensitive data.""" return f"Processed: {data}" - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use sensitive tool with private data") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find tool spans - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # If tool was executed, verify input/output are not captured for tool_span in tool_spans: - assert "gen_ai.tool.input" not in tool_span["data"] - assert "gen_ai.tool.output" not in tool_span["data"] + assert "gen_ai.tool.input" not in tool_span["attributes"] + assert "gen_ai.tool.output" not in tool_span["attributes"] @pytest.mark.asyncio -async def test_multiple_agents_concurrent(sentry_init, capture_events, get_test_agent): +async def test_multiple_agents_concurrent(sentry_init, capture_items, get_test_agent): """ Test that multiple agents can run concurrently without interfering. """ @@ -739,7 +767,7 @@ async def test_multiple_agents_concurrent(sentry_init, capture_events, get_test_ traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() @@ -750,18 +778,15 @@ async def run_agent(input_text): results = await asyncio.gather(*[run_agent(f"Input {i}") for i in range(3)]) assert len(results) == 3 - assert len(events) == 3 # Verify each transaction is separate + events = [item.payload for item in items if item.type == "transaction"] for i, transaction in enumerate(events): - assert transaction["type"] == "transaction" assert transaction["transaction"] == "invoke_agent test_agent" - # Each should have its own spans - assert len(transaction["spans"]) >= 1 @pytest.mark.asyncio -async def test_message_history(sentry_init, capture_events): +async def test_message_history(sentry_init, capture_items): """ Test that full conversation history is captured in chat spans. """ @@ -776,7 +801,7 @@ async def test_message_history(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # First message await agent.run("Hello, I'm Alice") @@ -797,23 +822,26 @@ async def test_message_history(sentry_init, capture_events): await agent.run("What is my name?", message_history=history) # We should have 2 transactions + events = [item.payload for item in items if item.type == "transaction"] assert len(events) >= 2 # Check the second transaction has the full history second_transaction = events[1] spans = second_transaction["spans"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] if chat_spans: chat_span = chat_spans[0] - if "gen_ai.request.messages" in chat_span["data"]: - messages_data = chat_span["data"]["gen_ai.request.messages"] + if "gen_ai.request.messages" in chat_span["attributes"]: + messages_data = chat_span["attributes"]["gen_ai.request.messages"] # Should have multiple messages including history assert len(messages_data) > 1 @pytest.mark.asyncio -async def test_gen_ai_system(sentry_init, capture_events, get_test_agent): +async def test_gen_ai_system(sentry_init, capture_items, get_test_agent): """ Test that gen_ai.system is set from the model. """ @@ -822,26 +850,27 @@ async def test_gen_ai_system(sentry_init, capture_events, get_test_agent): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find chat span - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] # gen_ai.system should be set from the model (TestModel -> 'test') - assert "gen_ai.system" in chat_span["data"] - assert chat_span["data"]["gen_ai.system"] == "test" + assert "gen_ai.system" in chat_span["attributes"] + assert chat_span["attributes"]["gen_ai.system"] == "test" @pytest.mark.asyncio -async def test_include_prompts_false(sentry_init, capture_events, get_test_agent): +async def test_include_prompts_false(sentry_init, capture_items, get_test_agent): """ Test that prompts are not captured when include_prompts=False. """ @@ -851,25 +880,26 @@ async def test_include_prompts_false(sentry_init, capture_events, get_test_agent send_default_pii=True, # Even with PII enabled, prompts should not be captured ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Sensitive prompt") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Verify that messages and response text are not captured for span in chat_spans: - assert "gen_ai.request.messages" not in span["data"] - assert "gen_ai.response.text" not in span["data"] + assert "gen_ai.request.messages" not in span["attributes"] + assert "gen_ai.response.text" not in span["attributes"] @pytest.mark.asyncio -async def test_include_prompts_true(sentry_init, capture_events, get_test_agent): +async def test_include_prompts_true(sentry_init, capture_items, get_test_agent): """ Test that prompts are captured when include_prompts=True (default). """ @@ -879,26 +909,27 @@ async def test_include_prompts_true(sentry_init, capture_events, get_test_agent) send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Test prompt") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Verify that messages are captured in chat spans assert len(chat_spans) >= 1 for chat_span in chat_spans: - assert "gen_ai.request.messages" in chat_span["data"] + assert "gen_ai.request.messages" in chat_span["attributes"] @pytest.mark.asyncio async def test_include_prompts_false_with_tools( - sentry_init, capture_events, get_test_agent + sentry_init, capture_items, get_test_agent ): """ Test that tool input/output are not captured when include_prompts=False. @@ -916,26 +947,27 @@ def test_tool(value: int) -> int: """A test tool.""" return value * 2 - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use the test tool with value 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find tool spans - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # If tool was executed, verify input/output are not captured for tool_span in tool_spans: - assert "gen_ai.tool.input" not in tool_span["data"] - assert "gen_ai.tool.output" not in tool_span["data"] + assert "gen_ai.tool.input" not in tool_span["attributes"] + assert "gen_ai.tool.output" not in tool_span["attributes"] @pytest.mark.asyncio -async def test_include_prompts_requires_pii( - sentry_init, capture_events, get_test_agent -): +async def test_include_prompts_requires_pii(sentry_init, capture_items, get_test_agent): """ Test that include_prompts requires send_default_pii=True. """ @@ -945,25 +977,26 @@ async def test_include_prompts_requires_pii( send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Test prompt") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Even with include_prompts=True, if PII is disabled, messages should not be captured for span in chat_spans: - assert "gen_ai.request.messages" not in span["data"] - assert "gen_ai.response.text" not in span["data"] + assert "gen_ai.request.messages" not in span["attributes"] + assert "gen_ai.response.text" not in span["attributes"] @pytest.mark.asyncio -async def test_mcp_tool_execution_spans(sentry_init, capture_events): +async def test_mcp_tool_execution_spans(sentry_init, capture_items): """ Test that MCP (Model Context Protocol) tool calls create execute_tool spans. @@ -1035,12 +1068,10 @@ async def mock_map_tool_result_part(part): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Simulate MCP tool execution within a transaction through CombinedToolset - with sentry_sdk.start_transaction( - op="ai.run", name="invoke_agent test_mcp_agent" - ) as transaction: + with sentry_sdk.start_transaction(op="ai.run", name="invoke_agent test_mcp_agent"): # Set up the agent context scope = sentry_sdk.get_current_scope() scope._contexts["pydantic_ai_agent"] = { @@ -1080,13 +1111,10 @@ async def mock_map_tool_result_part(part): # MCP tool might raise if not fully mocked, that's okay pass - events_list = events + events_list = items if len(events_list) == 0: pytest.skip("No events captured, MCP test setup incomplete") - (transaction,) = events_list - transaction["spans"] - # Note: This test manually calls combined.call_tool which doesn't go through # ToolManager._call_tool (which is what the integration patches). # In real-world usage, MCP tools are called through agent.run() which uses ToolManager. @@ -1256,7 +1284,7 @@ async def run_and_check_context(agent, agent_name): @pytest.mark.asyncio -async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_events): +async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_items): """ Test that invoke_agent span handles list user prompts correctly. """ @@ -1271,15 +1299,14 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Use a list as user prompt await agent.run(["First part", "Second part"]) - (transaction,) = events - # Check that the invoke_agent transaction has messages data # The invoke_agent is the transaction itself + (transaction,) = [item.payload for item in items if item.type == "transaction"] if "gen_ai.request.messages" in transaction["contexts"]["trace"]["data"]: messages_str = transaction["contexts"]["trace"]["data"][ "gen_ai.request.messages" @@ -1299,7 +1326,7 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_events): ], ) async def test_invoke_agent_with_instructions( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """ Test that invoke_agent span handles instructions correctly. @@ -1322,31 +1349,34 @@ async def test_invoke_agent_with_instructions( send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # The transaction IS the invoke_agent span, check for messages in chat spans instead - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] if send_default_pii and include_prompts: - system_instructions = chat_span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + system_instructions = chat_span["attributes"][ + SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS + ] assert json.loads(system_instructions) == [ {"type": "text", "content": "System prompt"}, {"type": "text", "content": "Instruction 1\nInstruction 2"}, ] else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["attributes"] @pytest.mark.asyncio -async def test_model_name_extraction_with_callable(sentry_init, capture_events): +async def test_model_name_extraction_with_callable(sentry_init, capture_items): """ Test model name extraction when model has a callable name() method. """ @@ -1372,7 +1402,7 @@ async def test_model_name_extraction_with_callable(sentry_init, capture_events): @pytest.mark.asyncio -async def test_model_name_extraction_fallback_to_str(sentry_init, capture_events): +async def test_model_name_extraction_fallback_to_str(sentry_init, capture_items): """ Test model name extraction falls back to str() when no name attribute exists. """ @@ -1399,7 +1429,7 @@ async def test_model_name_extraction_fallback_to_str(sentry_init, capture_events @pytest.mark.asyncio -async def test_model_settings_object_style(sentry_init, capture_events): +async def test_model_settings_object_style(sentry_init, capture_items): """ Test that object-style model settings (non-dict) are handled correctly. """ @@ -1433,7 +1463,7 @@ async def test_model_settings_object_style(sentry_init, capture_events): @pytest.mark.asyncio -async def test_usage_data_partial(sentry_init, capture_events): +async def test_usage_data_partial(sentry_init, capture_items): """ Test that usage data is correctly handled when only some fields are present. """ @@ -1447,14 +1477,15 @@ async def test_usage_data_partial(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Check that usage data fields exist (they may or may not be set depending on TestModel) @@ -1464,7 +1495,7 @@ async def test_usage_data_partial(sentry_init, capture_events): @pytest.mark.asyncio -async def test_agent_data_from_scope(sentry_init, capture_events): +async def test_agent_data_from_scope(sentry_init, capture_items): """ Test that agent data can be retrieved from Sentry scope when not passed directly. """ @@ -1479,20 +1510,19 @@ async def test_agent_data_from_scope(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") # The integration automatically sets agent in scope during execution await agent.run("Test input") - (transaction,) = events - - # Verify agent name is captured + # Verify agent name is capture + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_scope_agent" @pytest.mark.asyncio async def test_available_tools_without_description( - sentry_init, capture_events, get_test_agent + sentry_init, capture_items, get_test_agent ): """ Test that available tools are captured even when description is missing. @@ -1509,23 +1539,24 @@ def tool_without_desc(x: int) -> int: # No docstring = no description return x * 2 - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use the tool with 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] if chat_spans: chat_span = chat_spans[0] - if "gen_ai.request.available_tools" in chat_span["data"]: - tools_str = chat_span["data"]["gen_ai.request.available_tools"] + if "gen_ai.request.available_tools" in chat_span["attributes"]: + tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] assert "tool_without_desc" in tools_str @pytest.mark.asyncio -async def test_output_with_tool_calls(sentry_init, capture_events, get_test_agent): +async def test_output_with_tool_calls(sentry_init, capture_items, get_test_agent): """ Test that tool calls in model response are captured correctly. """ @@ -1542,14 +1573,15 @@ def calc_tool(value: int) -> int: """Calculate something.""" return value + 10 - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use calc_tool with 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # At least one chat span should exist assert len(chat_spans) >= 1 @@ -1558,11 +1590,11 @@ def calc_tool(value: int) -> int: for chat_span in chat_spans: # Tool calls may or may not be in response depending on TestModel behavior # Just verify the span was created and has basic data - assert "gen_ai.operation.name" in chat_span["data"] + assert "gen_ai.operation.name" in chat_span["attributes"] @pytest.mark.asyncio -async def test_message_formatting_with_different_parts(sentry_init, capture_events): +async def test_message_formatting_with_different_parts(sentry_init, capture_items): """ Test that different message part types are handled correctly in ai_client span. """ @@ -1579,7 +1611,7 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_even send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Create message history with different part types history = [ @@ -1594,24 +1626,25 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_even await agent.run("What did I say?", message_history=history) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Should have chat spans assert len(chat_spans) >= 1 # Check that messages are captured chat_span = chat_spans[0] - if "gen_ai.request.messages" in chat_span["data"]: - messages_data = chat_span["data"]["gen_ai.request.messages"] + if "gen_ai.request.messages" in chat_span["attributes"]: + messages_data = chat_span["attributes"]["gen_ai.request.messages"] # Should contain message history assert messages_data is not None @pytest.mark.asyncio -async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_events): +async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_items): """ Test that update_invoke_agent_span handles None output gracefully. """ @@ -1639,7 +1672,7 @@ async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_ev @pytest.mark.asyncio -async def test_update_ai_client_span_with_none_response(sentry_init, capture_events): +async def test_update_ai_client_span_with_none_response(sentry_init, capture_items): """ Test that update_ai_client_span handles None response gracefully. """ @@ -1666,7 +1699,7 @@ async def test_update_ai_client_span_with_none_response(sentry_init, capture_eve @pytest.mark.asyncio -async def test_agent_without_name(sentry_init, capture_events): +async def test_agent_without_name(sentry_init, capture_items): """ Test that agent without a name is handled correctly. """ @@ -1678,20 +1711,18 @@ async def test_agent_without_name(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Test input") - (transaction,) = events - # Should still create transaction, just with default name - assert transaction["type"] == "transaction" + (transaction,) = (item.payload for item in items if item.type == "transaction") # Transaction name should be "invoke_agent agent" or similar default assert "invoke_agent" in transaction["transaction"] @pytest.mark.asyncio -async def test_model_response_without_parts(sentry_init, capture_events): +async def test_model_response_without_parts(sentry_init, capture_items): """ Test handling of model response without parts attribute. """ @@ -1723,7 +1754,7 @@ async def test_model_response_without_parts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_input_messages_error_handling(sentry_init, capture_events): +async def test_input_messages_error_handling(sentry_init, capture_items): """ Test that _set_input_messages handles errors gracefully. """ @@ -1751,7 +1782,7 @@ async def test_input_messages_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_available_tools_error_handling(sentry_init, capture_events): +async def test_available_tools_error_handling(sentry_init, capture_items): """ Test that _set_available_tools handles errors gracefully. """ @@ -1781,7 +1812,7 @@ async def test_available_tools_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_usage_data_with_none_usage(sentry_init, capture_events): +async def test_set_usage_data_with_none_usage(sentry_init, capture_items): """ Test that _set_usage_data handles None usage gracefully. """ @@ -1806,7 +1837,7 @@ async def test_set_usage_data_with_none_usage(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_usage_data_with_partial_fields(sentry_init, capture_events): +async def test_set_usage_data_with_partial_fields(sentry_init, capture_items): """ Test that _set_usage_data handles usage with only some fields. """ @@ -1838,7 +1869,7 @@ async def test_set_usage_data_with_partial_fields(sentry_init, capture_events): @pytest.mark.asyncio -async def test_message_parts_with_tool_return(sentry_init, capture_events): +async def test_message_parts_with_tool_return(sentry_init, capture_items): """ Test that ToolReturnPart messages are handled correctly. """ @@ -1860,22 +1891,23 @@ def test_tool(x: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Run with history containing tool return await agent.run("Use test_tool with 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Should have chat spans assert len(chat_spans) >= 1 @pytest.mark.asyncio -async def test_message_parts_with_list_content(sentry_init, capture_events): +async def test_message_parts_with_list_content(sentry_init, capture_items): """ Test that message parts with list content are handled correctly. """ @@ -1910,7 +1942,7 @@ async def test_message_parts_with_list_content(sentry_init, capture_events): @pytest.mark.asyncio -async def test_output_data_with_text_and_tool_calls(sentry_init, capture_events): +async def test_output_data_with_text_and_tool_calls(sentry_init, capture_items): """ Test that _set_output_data handles both text and tool calls in response. """ @@ -1949,7 +1981,7 @@ async def test_output_data_with_text_and_tool_calls(sentry_init, capture_events) @pytest.mark.asyncio -async def test_output_data_error_handling(sentry_init, capture_events): +async def test_output_data_error_handling(sentry_init, capture_items): """ Test that _set_output_data handles errors in formatting gracefully. """ @@ -1981,7 +2013,7 @@ async def test_output_data_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_message_with_system_prompt_part(sentry_init, capture_events): +async def test_message_with_system_prompt_part(sentry_init, capture_items): """ Test that SystemPromptPart is handled with correct role. """ @@ -2017,7 +2049,7 @@ async def test_message_with_system_prompt_part(sentry_init, capture_events): @pytest.mark.asyncio -async def test_message_with_instructions(sentry_init, capture_events): +async def test_message_with_instructions(sentry_init, capture_items): """ Test that messages with instructions field are handled correctly. """ @@ -2052,7 +2084,7 @@ async def test_message_with_instructions(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_input_messages_without_prompts(sentry_init, capture_events): +async def test_set_input_messages_without_prompts(sentry_init, capture_items): """ Test that _set_input_messages respects _should_send_prompts(). """ @@ -2078,7 +2110,7 @@ async def test_set_input_messages_without_prompts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_output_data_without_prompts(sentry_init, capture_events): +async def test_set_output_data_without_prompts(sentry_init, capture_items): """ Test that _set_output_data respects _should_send_prompts(). """ @@ -2107,7 +2139,7 @@ async def test_set_output_data_without_prompts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_get_model_name_with_exception_in_callable(sentry_init, capture_events): +async def test_get_model_name_with_exception_in_callable(sentry_init, capture_items): """ Test that _get_model_name handles exceptions in name() callable. """ @@ -2131,7 +2163,7 @@ async def test_get_model_name_with_exception_in_callable(sentry_init, capture_ev @pytest.mark.asyncio -async def test_get_model_name_with_string_model(sentry_init, capture_events): +async def test_get_model_name_with_string_model(sentry_init, capture_items): """ Test that _get_model_name handles string models. """ @@ -2150,7 +2182,7 @@ async def test_get_model_name_with_string_model(sentry_init, capture_events): @pytest.mark.asyncio -async def test_get_model_name_with_none(sentry_init, capture_events): +async def test_get_model_name_with_none(sentry_init, capture_items): """ Test that _get_model_name handles None model. """ @@ -2169,7 +2201,7 @@ async def test_get_model_name_with_none(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_model_data_with_system(sentry_init, capture_events): +async def test_set_model_data_with_system(sentry_init, capture_items): """ Test that _set_model_data captures system from model. """ @@ -2200,7 +2232,7 @@ async def test_set_model_data_with_system(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_model_data_from_agent_scope(sentry_init, capture_events): +async def test_set_model_data_from_agent_scope(sentry_init, capture_items): """ Test that _set_model_data retrieves model from agent in scope when not passed. """ @@ -2234,7 +2266,7 @@ async def test_set_model_data_from_agent_scope(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_model_data_with_none_settings_values(sentry_init, capture_events): +async def test_set_model_data_with_none_settings_values(sentry_init, capture_items): """ Test that _set_model_data skips None values in settings. """ @@ -2266,7 +2298,7 @@ async def test_set_model_data_with_none_settings_values(sentry_init, capture_eve @pytest.mark.asyncio -async def test_should_send_prompts_without_pii(sentry_init, capture_events): +async def test_should_send_prompts_without_pii(sentry_init, capture_items): """ Test that _should_send_prompts returns False when PII disabled. """ @@ -2284,7 +2316,7 @@ async def test_should_send_prompts_without_pii(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_agent_data_without_agent(sentry_init, capture_events): +async def test_set_agent_data_without_agent(sentry_init, capture_items): """ Test that _set_agent_data handles None agent gracefully. """ @@ -2309,7 +2341,7 @@ async def test_set_agent_data_without_agent(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_agent_data_from_scope(sentry_init, capture_events): +async def test_set_agent_data_from_scope(sentry_init, capture_items): """ Test that _set_agent_data retrieves agent from scope when not passed. """ @@ -2341,7 +2373,7 @@ async def test_set_agent_data_from_scope(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_agent_data_without_name(sentry_init, capture_events): +async def test_set_agent_data_without_name(sentry_init, capture_items): """ Test that _set_agent_data handles agent without name attribute. """ @@ -2371,7 +2403,7 @@ async def test_set_agent_data_without_name(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_available_tools_without_toolset(sentry_init, capture_events): +async def test_set_available_tools_without_toolset(sentry_init, capture_items): """ Test that _set_available_tools handles agent without toolset. """ @@ -2401,7 +2433,7 @@ async def test_set_available_tools_without_toolset(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_available_tools_with_schema(sentry_init, capture_events): +async def test_set_available_tools_with_schema(sentry_init, capture_items): """ Test that _set_available_tools extracts tool schema correctly. """ @@ -2437,7 +2469,7 @@ async def test_set_available_tools_with_schema(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_creation(sentry_init, capture_events): +async def test_execute_tool_span_creation(sentry_init, capture_items): """ Test direct creation of execute_tool span. """ @@ -2464,7 +2496,7 @@ async def test_execute_tool_span_creation(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_with_mcp_type(sentry_init, capture_events): +async def test_execute_tool_span_with_mcp_type(sentry_init, capture_items): """ Test execute_tool span with MCP tool type. """ @@ -2490,7 +2522,7 @@ async def test_execute_tool_span_with_mcp_type(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_without_prompts(sentry_init, capture_events): +async def test_execute_tool_span_without_prompts(sentry_init, capture_items): """ Test that execute_tool span respects _should_send_prompts(). """ @@ -2517,7 +2549,7 @@ async def test_execute_tool_span_without_prompts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_with_none_args(sentry_init, capture_events): +async def test_execute_tool_span_with_none_args(sentry_init, capture_items): """ Test execute_tool span with None args. """ @@ -2540,7 +2572,7 @@ async def test_execute_tool_span_with_none_args(sentry_init, capture_events): @pytest.mark.asyncio -async def test_update_execute_tool_span_with_none_span(sentry_init, capture_events): +async def test_update_execute_tool_span_with_none_span(sentry_init, capture_items): """ Test that update_execute_tool_span handles None span gracefully. """ @@ -2561,7 +2593,7 @@ async def test_update_execute_tool_span_with_none_span(sentry_init, capture_even @pytest.mark.asyncio -async def test_update_execute_tool_span_with_none_result(sentry_init, capture_events): +async def test_update_execute_tool_span_with_none_result(sentry_init, capture_items): """ Test that update_execute_tool_span handles None result gracefully. """ @@ -2588,7 +2620,7 @@ async def test_update_execute_tool_span_with_none_result(sentry_init, capture_ev @pytest.mark.asyncio -async def test_tool_execution_without_span_context(sentry_init, capture_events): +async def test_tool_execution_without_span_context(sentry_init, capture_items): """ Test that tool execution patch handles case when no span context exists. This tests the code path where current_span is None in _patch_tool_execution. @@ -2617,7 +2649,7 @@ async def test_tool_execution_without_span_context(sentry_init, capture_events): @pytest.mark.asyncio -async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_events): +async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_items): """ Test that invoke_agent_span skips callable instructions correctly. """ @@ -2650,7 +2682,7 @@ async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_ @pytest.mark.asyncio -async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_events): +async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_items): """ Test that invoke_agent_span handles string instructions (not list). """ @@ -2680,7 +2712,7 @@ async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_e @pytest.mark.asyncio -async def test_ai_client_span_with_streaming_flag(sentry_init, capture_events): +async def test_ai_client_span_with_streaming_flag(sentry_init, capture_items): """ Test that ai_client_span reads streaming flag from scope. """ @@ -2706,7 +2738,7 @@ async def test_ai_client_span_with_streaming_flag(sentry_init, capture_events): @pytest.mark.asyncio -async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_events): +async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_items): """ Test that ai_client_span gets agent from scope when not passed. """ @@ -2759,7 +2791,7 @@ def _find_binary_content(messages_data, expected_modality, expected_mime_type): @pytest.mark.asyncio -async def test_binary_content_encoding_image(sentry_init, capture_events): +async def test_binary_content_encoding_image(sentry_init, capture_items): """Test that BinaryContent with image data is properly encoded in messages.""" sentry_init( integrations=[PydanticAIIntegration()], @@ -2767,7 +2799,7 @@ async def test_binary_content_encoding_image(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): span = sentry_sdk.start_span(op="test_span") @@ -2782,14 +2814,14 @@ async def test_binary_content_encoding_image(sentry_init, capture_events): _set_input_messages(span, [mock_msg]) span.finish() - (event,) = events + (event,) = (item.payload for item in items if item.type == "transaction") span_data = event["spans"][0]["data"] messages_data = _get_messages_from_span(span_data) assert _find_binary_content(messages_data, "image", "image/png") @pytest.mark.asyncio -async def test_binary_content_encoding_mixed_content(sentry_init, capture_events): +async def test_binary_content_encoding_mixed_content(sentry_init, capture_items): """Test that BinaryContent mixed with text content is properly handled.""" sentry_init( integrations=[PydanticAIIntegration()], @@ -2797,7 +2829,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_events send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): span = sentry_sdk.start_span(op="test_span") @@ -2814,7 +2846,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_events _set_input_messages(span, [mock_msg]) span.finish() - (event,) = events + (event,) = (item.payload for item in items if item.type == "transaction") span_data = event["spans"][0]["data"] messages_data = _get_messages_from_span(span_data) @@ -2830,7 +2862,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_events @pytest.mark.asyncio -async def test_binary_content_in_agent_run(sentry_init, capture_events): +async def test_binary_content_in_agent_run(sentry_init, capture_items): """Test that BinaryContent in actual agent run is properly captured in spans.""" agent = Agent("test", name="test_binary_agent") @@ -2840,28 +2872,30 @@ async def test_binary_content_in_agent_run(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") binary_content = BinaryContent( data=b"fake_image_data_for_testing", media_type="image/png" ) await agent.run(["Analyze this image:", binary_content]) - (transaction,) = events - chat_spans = [s for s in transaction["spans"] if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] - if "gen_ai.request.messages" in chat_span["data"]: - messages_str = str(chat_span["data"]["gen_ai.request.messages"]) + if "gen_ai.request.messages" in chat_span["attributes"]: + messages_str = str(chat_span["attributes"]["gen_ai.request.messages"]) assert any(keyword in messages_str for keyword in ["blob", "image", "base64"]) @pytest.mark.asyncio -async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): +async def test_set_usage_data_with_cache_tokens(sentry_init, capture_items): """Test that cache_read_tokens and cache_write_tokens are tracked.""" sentry_init(integrations=[PydanticAIIntegration()], traces_sample_rate=1.0) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): span = sentry_sdk.start_span(op="test_span") @@ -2874,7 +2908,7 @@ async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): _set_usage_data(span, usage) span.finish() - (event,) = events + (event,) = (item.payload for item in items if item.type == "transaction") (span_data,) = event["spans"] assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 @@ -2922,7 +2956,7 @@ async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): ], ) def test_image_url_base64_content_in_span( - sentry_init, capture_events, url, image_url_kwargs, expected_content + sentry_init, capture_items, url, image_url_kwargs, expected_content ): from sentry_sdk.integrations.pydantic_ai.spans.ai_client import ai_client_span @@ -2932,7 +2966,7 @@ def test_image_url_base64_content_in_span( send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): image_url = ImageUrl(url=url, **image_url_kwargs) @@ -2944,10 +2978,12 @@ def test_image_url_base64_content_in_span( span = ai_client_span([mock_msg], None, None, None) span.finish() - (event,) = events - chat_spans = [s for s in event["spans"] if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 - messages_data = _get_messages_from_span(chat_spans[0]["data"]) + messages_data = _get_messages_from_span(chat_spans[0]["attributes"]) found_image = False for msg in messages_data: @@ -2992,7 +3028,7 @@ def test_image_url_base64_content_in_span( ], ) async def test_invoke_agent_image_url( - sentry_init, capture_events, url, image_url_kwargs, expected_content + sentry_init, capture_items, url, image_url_kwargs, expected_content ): sentry_init( integrations=[PydanticAIIntegration()], @@ -3002,17 +3038,18 @@ async def test_invoke_agent_image_url( agent = Agent("test", name="test_image_url_agent") - events = capture_events() + items = capture_items("transaction", "span") image_url = ImageUrl(url=url, **image_url_kwargs) await agent.run([image_url, "Describe this image"]) - (transaction,) = events - found_image = False - chat_spans = [s for s in transaction["spans"] if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] for chat_span in chat_spans: - messages_data = _get_messages_from_span(chat_span["data"]) + messages_data = _get_messages_from_span(chat_span["attributes"]) for msg in messages_data: if "content" not in msg: continue @@ -3025,7 +3062,7 @@ async def test_invoke_agent_image_url( @pytest.mark.asyncio -async def test_tool_description_in_execute_tool_span(sentry_init, capture_events): +async def test_tool_description_in_execute_tool_span(sentry_init, capture_items): """ Test that tool description from the tool's docstring is included in execute_tool spans. """ @@ -3046,18 +3083,24 @@ def multiply_numbers(a: int, b: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") result = await agent.run("What is 5 times 3?") assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] assert len(tool_spans) >= 1 tool_span = tool_spans[0] - assert tool_span["data"]["gen_ai.tool.name"] == "multiply_numbers" - assert SPANDATA.GEN_AI_TOOL_DESCRIPTION in tool_span["data"] - assert "Multiply two numbers" in tool_span["data"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + assert tool_span["attributes"]["gen_ai.tool.name"] == "multiply_numbers" + assert SPANDATA.GEN_AI_TOOL_DESCRIPTION in tool_span["attributes"] + assert ( + "Multiply two numbers" + in tool_span["attributes"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + ) From 7befc7d3863593c0414d437e59f7591ac4334cf5 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 13:03:38 +0200 Subject: [PATCH 19/36] . --- sentry_sdk/client.py | 16 ++++++++-- tests/tracing/test_decorator.py | 53 ++++++++++++++++++++++++--------- tests/tracing/test_misc.py | 8 ++--- 3 files changed, 56 insertions(+), 21 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index c6df2f564b..99e58ec499 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -245,9 +245,15 @@ def _serialized_v1_span_to_serialized_v2_span( res["attributes"] = {} for key, value in attributes.items(): - res["attributes"][key] = _serialized_v1_attribute_to_serialized_v2_attribute( - value - ) + converted_value = _serialized_v1_attribute_to_serialized_v2_attribute(value) + if converted_value is None: + continue + + res["attributes"][key] = converted_value + + # Remove redundant attribute, as status is stored in the status field. + if "status" in res["attributes"]: + del res["attributes"]["status"] return res @@ -268,6 +274,10 @@ def _split_gen_ai_spans( non_gen_ai_spans = [] gen_ai_spans = [] for span in spans: + if not isinstance(span, dict): + non_gen_ai_spans.append(span) + continue + span_op = span.get("op") if isinstance(span_op, str) and span_op.startswith("gen_ai."): gen_ai_spans.append(span) diff --git a/tests/tracing/test_decorator.py b/tests/tracing/test_decorator.py index 15432f5862..e73323138a 100644 --- a/tests/tracing/test_decorator.py +++ b/tests/tracing/test_decorator.py @@ -121,9 +121,9 @@ async def _some_function_traced(a, b, c): ) -def test_span_templates_ai_dicts(sentry_init, capture_events): +def test_span_templates_ai_dicts(sentry_init, capture_items): sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) def my_tool(arg1, arg2): @@ -166,40 +166,57 @@ def my_agent(): with sentry_sdk.start_transaction(name="test-transaction"): my_agent() - (event,) = events - (agent_span, tool_span, chat_span) = event["spans"] + (agent_span, tool_span, chat_span) = ( + item.payload for item in items if item.type == "span" + ) - assert agent_span["op"] == "gen_ai.invoke_agent" + assert agent_span["attributes"]["sentry.op"] == "gen_ai.invoke_agent" assert ( - agent_span["description"] + agent_span["name"] == "invoke_agent test_decorator.test_span_templates_ai_dicts..my_agent" ) - assert agent_span["data"] == { + assert agent_span["attributes"] == { "gen_ai.agent.name": "test_decorator.test_span_templates_ai_dicts..my_agent", "gen_ai.operation.name": "invoke_agent", + "sentry.environment": "production", + "sentry.op": "gen_ai.invoke_agent", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert tool_span["op"] == "gen_ai.execute_tool" + assert tool_span["attributes"]["sentry.op"] == "gen_ai.execute_tool" assert ( - tool_span["description"] + tool_span["name"] == "execute_tool test_decorator.test_span_templates_ai_dicts..my_tool" ) - assert tool_span["data"] == { + assert tool_span["attributes"] == { "gen_ai.tool.name": "test_decorator.test_span_templates_ai_dicts..my_tool", "gen_ai.operation.name": "execute_tool", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, "gen_ai.usage.total_tokens": 30, + "sentry.environment": "production", + "sentry.op": "gen_ai.execute_tool", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert "gen_ai.tool.description" not in tool_span["data"] + assert "gen_ai.tool.description" not in tool_span["attributes"] - assert chat_span["op"] == "gen_ai.chat" - assert chat_span["description"] == "chat my-gpt-4o-mini" - assert chat_span["data"] == { + assert chat_span["attributes"]["sentry.op"] == "gen_ai.chat" + assert chat_span["name"] == "chat my-gpt-4o-mini" + assert chat_span["attributes"] == { "gen_ai.operation.name": "chat", "gen_ai.request.frequency_penalty": 1.0, "gen_ai.request.max_tokens": 100, @@ -213,6 +230,14 @@ def my_agent(): "gen_ai.usage.input_tokens": 11, "gen_ai.usage.output_tokens": 22, "gen_ai.usage.total_tokens": 33, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index 8895c98dbc..f69e19791a 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -611,11 +611,11 @@ class TestConversationIdPropagation: """Tests for conversation_id propagation to AI spans.""" def test_conversation_id_propagates_to_span_with_gen_ai_operation_name( - self, sentry_init, capture_events + self, sentry_init, capture_items ): """Span with gen_ai.operation.name data should get conversation_id.""" sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("span") scope = sentry_sdk.get_current_scope() scope.set_conversation_id("conv-op-name-test") @@ -624,8 +624,8 @@ def test_conversation_id_propagates_to_span_with_gen_ai_operation_name( with start_span(op="http.client") as span: span.set_data("gen_ai.operation.name", "chat") - (event,) = events - span_data = event["spans"][0]["data"] + spans = [item.payload for item in items if item.type == "span"] + span_data = spans[0]["data"] assert span_data.get("gen_ai.conversation.id") == "conv-op-name-test" def test_conversation_id_propagates_to_span_with_ai_op( From fb348bb1037ce1350c714ad3da8ec7b77f79c350 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 13:12:59 +0200 Subject: [PATCH 20/36] openai-agents tests --- .../openai_agents/test_openai_agents.py | 855 ++++++++++-------- 1 file changed, 470 insertions(+), 385 deletions(-) diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index 7310e86df5..1c4925915d 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -160,7 +160,7 @@ def test_agent_custom_model(): @pytest.mark.asyncio async def test_agent_invocation_span_no_pii( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -184,7 +184,7 @@ async def test_agent_invocation_span_no_pii( send_default_pii=False, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -193,38 +193,44 @@ async def test_agent_invocation_span_no_pii( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" + assert invoke_agent_span["name"] == "invoke_agent test_agent" - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["data"] - assert "gen_ai.request.messages" not in invoke_agent_span["data"] - assert "gen_ai.response.text" not in invoke_agent_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["attributes"] + assert "gen_ai.request.messages" not in invoke_agent_span["attributes"] + assert "gen_ai.response.text" not in invoke_agent_span["attributes"] - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 @pytest.mark.asyncio @@ -305,7 +311,7 @@ async def test_agent_invocation_span_no_pii( ) async def test_agent_invocation_span( sentry_init, - capture_events, + capture_items, test_agent_with_instructions, nonstreaming_responses_model_response, instructions, @@ -335,7 +341,7 @@ async def test_agent_invocation_span( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, @@ -346,28 +352,34 @@ async def test_agent_invocation_span( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - invoke_agent_span, ai_client_span = spans - + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span, ai_client_span = spans + + assert invoke_agent_span["name"] == "invoke_agent test_agent" # Only first case checks "gen_ai.request.messages" until further input handling work. param_id = request.node.callspec.id if "string" in param_id and instructions is None: # type: ignore - assert "gen_ai.system_instructions" not in ai_client_span["data"] + assert "gen_ai.system_instructions" not in ai_client_span["attributes"] - assert invoke_agent_span["data"]["gen_ai.request.messages"] == safe_serialize( + assert invoke_agent_span["attributes"][ + "gen_ai.request.messages" + ] == safe_serialize( [ {"content": [{"text": "Test input", "type": "text"}], "role": "user"}, ] ) elif "string" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -376,13 +388,17 @@ async def test_agent_invocation_span( ] ) elif "blocks_no_type" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -392,13 +408,17 @@ async def test_agent_invocation_span( ] ) elif "blocks" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -408,14 +428,18 @@ async def test_agent_invocation_span( ] ) elif "parts_no_type" in param_id and instructions is None: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) elif "parts_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -426,14 +450,18 @@ async def test_agent_invocation_span( ] ) elif instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) else: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -445,32 +473,32 @@ async def test_agent_invocation_span( ) assert ( - invoke_agent_span["data"]["gen_ai.response.text"] + invoke_agent_span["attributes"]["gen_ai.response.text"] == "Hello, how can I help you?" ) - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 @pytest.mark.asyncio async def test_client_span_custom_model( sentry_init, - capture_events, + capture_items, test_agent_custom_model, nonstreaming_responses_model_response, get_model_response, @@ -497,7 +525,7 @@ async def test_client_span_custom_model( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "spans") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -506,17 +534,18 @@ async def test_client_span_custom_model( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert ai_client_span["description"] == "chat my-custom-model" - assert ai_client_span["data"]["gen_ai.request.model"] == "my-custom-model" + assert ai_client_span["name"] == "chat my-custom-model" + assert ai_client_span["attributes"]["gen_ai.request.model"] == "my-custom-model" def test_agent_invocation_span_sync_no_pii( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -543,42 +572,48 @@ def test_agent_invocation_span_sync_no_pii( send_default_pii=False, ) - events = capture_events() + items = capture_items("span", "transaction") result = agents.Runner.run_sync(agent, "Test input", run_config=test_run_config) assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT - ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) - + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span = next( + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) + + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["attributes"] @pytest.mark.parametrize( @@ -658,7 +693,7 @@ def test_agent_invocation_span_sync_no_pii( ) def test_agent_invocation_span_sync( sentry_init, - capture_events, + capture_items, test_agent_with_instructions, nonstreaming_responses_model_response, instructions, @@ -688,7 +723,7 @@ def test_agent_invocation_span_sync( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = agents.Runner.run_sync( agent, @@ -699,36 +734,40 @@ def test_agent_invocation_span_sync( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - invoke_agent_span, ai_client_span = spans - + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 - - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span, ai_client_span = spans + + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 + + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 param_id = request.node.callspec.id if "string" in param_id and instructions is None: # type: ignore - assert "gen_ai.system_instructions" not in ai_client_span["data"] + assert "gen_ai.system_instructions" not in ai_client_span["attributes"] elif "string" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -737,13 +776,17 @@ def test_agent_invocation_span_sync( ] ) elif "blocks_no_type" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -753,13 +796,17 @@ def test_agent_invocation_span_sync( ] ) elif "blocks" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -769,14 +816,18 @@ def test_agent_invocation_span_sync( ] ) elif "parts_no_type" in param_id and instructions is None: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) elif "parts_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -787,14 +838,18 @@ def test_agent_invocation_span_sync( ] ) elif instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) else: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -807,7 +862,7 @@ def test_agent_invocation_span_sync( @pytest.mark.asyncio -async def test_handoff_span(sentry_init, capture_events, get_model_response): +async def test_handoff_span(sentry_init, capture_items, get_model_response): """ Test that handoff spans are created when agents hand off to other agents. """ @@ -910,7 +965,7 @@ async def test_handoff_span(sentry_init, capture_events, get_model_response): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") result = await agents.Runner.run( primary_agent, @@ -920,21 +975,22 @@ async def test_handoff_span(sentry_init, capture_events, get_model_response): assert result is not None - (transaction,) = events - spans = transaction["spans"] - handoff_span = next(span for span in spans if span.get("op") == OP.GEN_AI_HANDOFF) + spans = [item.payload for item in items if item.type == "span"] + handoff_span = next( + span + for span in spans + if span["attributes"].get("sentry.op") == OP.GEN_AI_HANDOFF + ) # Verify handoff span was created assert handoff_span is not None - assert ( - handoff_span["description"] == "handoff from primary_agent to secondary_agent" - ) - assert handoff_span["data"]["gen_ai.operation.name"] == "handoff" + assert handoff_span["name"] == "handoff from primary_agent to secondary_agent" + assert handoff_span["attributes"]["gen_ai.operation.name"] == "handoff" @pytest.mark.asyncio async def test_max_turns_before_handoff_span( - sentry_init, capture_events, get_model_response + sentry_init, capture_items, get_model_response ): """ Example raising agents.exceptions.AgentsException after the agent invocation span is complete. @@ -1038,7 +1094,7 @@ async def test_max_turns_before_handoff_span( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") with pytest.raises(MaxTurnsExceeded): await agents.Runner.run( @@ -1048,22 +1104,23 @@ async def test_max_turns_before_handoff_span( max_turns=1, ) - (error, transaction) = events - spans = transaction["spans"] - handoff_span = next(span for span in spans if span.get("op") == OP.GEN_AI_HANDOFF) + spans = [item.payload for item in items if item.type == "span"] + handoff_span = next( + span + for span in spans + if span["attributes"].get("sentry.op") == OP.GEN_AI_HANDOFF + ) # Verify handoff span was created assert handoff_span is not None - assert ( - handoff_span["description"] == "handoff from primary_agent to secondary_agent" - ) - assert handoff_span["data"]["gen_ai.operation.name"] == "handoff" + assert handoff_span["name"] == "handoff from primary_agent to secondary_agent" + assert handoff_span["attributes"]["gen_ai.operation.name"] == "handoff" @pytest.mark.asyncio async def test_tool_execution_span( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, responses_tool_call_model_responses, @@ -1135,7 +1192,7 @@ def simple_test_tool(message: str) -> str: send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") await agents.Runner.run( agent_with_tool, @@ -1143,13 +1200,26 @@ def simple_test_tool(message: str) -> str: run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] - agent_span = next(span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT) + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] + assert transaction["transaction"] == "test_agent workflow" + assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" + + spans = [item.payload for item in items if item.type == "span"] + agent_span = next( + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) ai_client_span1, ai_client_span2 = ( - span for span in spans if span["op"] == OP.GEN_AI_CHAT + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) + tool_span = next( + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_EXECUTE_TOOL ) - tool_span = next(span for span in spans if span["op"] == OP.GEN_AI_EXECUTE_TOOL) available_tool = { "name": "simple_test_tool", @@ -1189,39 +1259,36 @@ def simple_test_tool(message: str) -> str: } ) - assert transaction["transaction"] == "test_agent workflow" - assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - - assert agent_span["description"] == "invoke_agent test_agent" - assert agent_span["origin"] == "auto.ai.openai_agents" - assert agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" + assert agent_span["name"] == "invoke_agent test_agent" + assert agent_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" + assert agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" agent_span_available_tool = json.loads( - agent_span["data"]["gen_ai.request.available_tools"] + agent_span["attributes"]["gen_ai.request.available_tools"] )[0] assert all(agent_span_available_tool[k] == v for k, v in available_tool.items()) - assert agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert agent_span["data"]["gen_ai.request.top_p"] == 1.0 - assert agent_span["data"]["gen_ai.system"] == "openai" + assert agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 + assert agent_span["attributes"]["gen_ai.system"] == "openai" - assert ai_client_span1["description"] == "chat gpt-4" - assert ai_client_span1["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span1["data"]["gen_ai.system"] == "openai" - assert ai_client_span1["data"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span1["name"] == "chat gpt-4" + assert ai_client_span1["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span1["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span1["attributes"]["gen_ai.agent.name"] == "test_agent" ai_client_span1_available_tool = json.loads( - ai_client_span1["data"]["gen_ai.request.available_tools"] + ai_client_span1["attributes"]["gen_ai.request.available_tools"] )[0] assert all( ai_client_span1_available_tool[k] == v for k, v in available_tool.items() ) - assert ai_client_span1["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span1["data"]["gen_ai.request.messages"] == safe_serialize( + assert ai_client_span1["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span1["attributes"]["gen_ai.request.messages"] == safe_serialize( [ { "role": "user", @@ -1231,14 +1298,14 @@ def simple_test_tool(message: str) -> str: }, ] ) - assert ai_client_span1["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span1["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span1["data"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span1["data"]["gen_ai.usage.input_tokens"] == 10 - assert ai_client_span1["data"]["gen_ai.usage.input_tokens.cached"] == 0 - assert ai_client_span1["data"]["gen_ai.usage.output_tokens"] == 5 - assert ai_client_span1["data"]["gen_ai.usage.output_tokens.reasoning"] == 0 - assert ai_client_span1["data"]["gen_ai.usage.total_tokens"] == 15 + assert ai_client_span1["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span1["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span1["attributes"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span1["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert ai_client_span1["attributes"]["gen_ai.usage.input_tokens.cached"] == 0 + assert ai_client_span1["attributes"]["gen_ai.usage.output_tokens"] == 5 + assert ai_client_span1["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 0 + assert ai_client_span1["attributes"]["gen_ai.usage.total_tokens"] == 15 tool_call = { "arguments": '{"message": "hello"}', @@ -1252,41 +1319,41 @@ def simple_test_tool(message: str) -> str: if OPENAI_VERSION >= (2, 25, 0): tool_call["namespace"] = None - assert json.loads(ai_client_span1["data"]["gen_ai.response.tool_calls"]) == [ + assert json.loads(ai_client_span1["attributes"]["gen_ai.response.tool_calls"]) == [ tool_call ] - assert tool_span["description"] == "execute_tool simple_test_tool" - assert tool_span["data"]["gen_ai.agent.name"] == "test_agent" - assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool" + assert tool_span["name"] == "execute_tool simple_test_tool" + assert tool_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" tool_span_available_tool = json.loads( - tool_span["data"]["gen_ai.request.available_tools"] + tool_span["attributes"]["gen_ai.request.available_tools"] )[0] assert all(tool_span_available_tool[k] == v for k, v in available_tool.items()) - assert tool_span["data"]["gen_ai.request.max_tokens"] == 100 - assert tool_span["data"]["gen_ai.request.model"] == "gpt-4" - assert tool_span["data"]["gen_ai.request.temperature"] == 0.7 - assert tool_span["data"]["gen_ai.request.top_p"] == 1.0 - assert tool_span["data"]["gen_ai.system"] == "openai" - assert tool_span["data"]["gen_ai.tool.description"] == "A simple tool" - assert tool_span["data"]["gen_ai.tool.input"] == '{"message": "hello"}' - assert tool_span["data"]["gen_ai.tool.name"] == "simple_test_tool" - assert tool_span["data"]["gen_ai.tool.output"] == "Tool executed with: hello" - assert ai_client_span2["description"] == "chat gpt-4" - assert ai_client_span2["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span2["data"]["gen_ai.operation.name"] == "chat" + assert tool_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert tool_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert tool_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert tool_span["attributes"]["gen_ai.request.top_p"] == 1.0 + assert tool_span["attributes"]["gen_ai.system"] == "openai" + assert tool_span["attributes"]["gen_ai.tool.description"] == "A simple tool" + assert tool_span["attributes"]["gen_ai.tool.input"] == '{"message": "hello"}' + assert tool_span["attributes"]["gen_ai.tool.name"] == "simple_test_tool" + assert tool_span["attributes"]["gen_ai.tool.output"] == "Tool executed with: hello" + assert ai_client_span2["name"] == "chat gpt-4" + assert ai_client_span2["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span2["attributes"]["gen_ai.operation.name"] == "chat" ai_client_span2_available_tool = json.loads( - ai_client_span2["data"]["gen_ai.request.available_tools"] + ai_client_span2["attributes"]["gen_ai.request.available_tools"] )[0] assert all( ai_client_span2_available_tool[k] == v for k, v in available_tool.items() ) - assert ai_client_span2["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span2["data"]["gen_ai.request.messages"] == safe_serialize( + assert ai_client_span2["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span2["attributes"]["gen_ai.request.messages"] == safe_serialize( [ { "role": "tool", @@ -1300,19 +1367,19 @@ def simple_test_tool(message: str) -> str: }, ] ) - assert ai_client_span2["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span2["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span2["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span2["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span2["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span2["attributes"]["gen_ai.request.top_p"] == 1.0 assert ( - ai_client_span2["data"]["gen_ai.response.text"] + ai_client_span2["attributes"]["gen_ai.response.text"] == "Task completed using the tool" ) - assert ai_client_span2["data"]["gen_ai.system"] == "openai" - assert ai_client_span2["data"]["gen_ai.usage.input_tokens.cached"] == 0 - assert ai_client_span2["data"]["gen_ai.usage.input_tokens"] == 15 - assert ai_client_span2["data"]["gen_ai.usage.output_tokens.reasoning"] == 0 - assert ai_client_span2["data"]["gen_ai.usage.output_tokens"] == 10 - assert ai_client_span2["data"]["gen_ai.usage.total_tokens"] == 25 + assert ai_client_span2["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span2["attributes"]["gen_ai.usage.input_tokens.cached"] == 0 + assert ai_client_span2["attributes"]["gen_ai.usage.input_tokens"] == 15 + assert ai_client_span2["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 0 + assert ai_client_span2["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert ai_client_span2["attributes"]["gen_ai.usage.total_tokens"] == 25 @pytest.mark.asyncio @@ -1570,7 +1637,7 @@ async def test_hosted_mcp_tool_propagation_headers( @pytest.mark.asyncio -async def test_model_behavior_error(sentry_init, capture_events, test_agent): +async def test_model_behavior_error(sentry_init, capture_items, test_agent): """ Example raising agents.exceptions.AgentsException before the agent invocation span is complete. The mocked API response indicates that "wrong_tool" was called. @@ -1613,7 +1680,7 @@ def simple_test_tool(message: str) -> str: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") with pytest.raises(ModelBehaviorError): await agents.Runner.run( @@ -1622,26 +1689,27 @@ def simple_test_tool(message: str) -> str: run_config=test_run_config, ) - (error, transaction) = events - spans = transaction["spans"] + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] + assert transaction["transaction"] == "test_agent workflow" + assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" + + spans = [item.payload for item in items if item.type == "span"] ( agent_span, ai_client_span1, ) = spans - assert transaction["transaction"] == "test_agent workflow" - assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - - assert agent_span["description"] == "invoke_agent test_agent" - assert agent_span["origin"] == "auto.ai.openai_agents" + assert agent_span["name"] == "invoke_agent test_agent" + assert agent_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" # Error due to unrecognized tool in model response. - assert agent_span["status"] == "internal_error" - assert agent_span["tags"]["status"] == "internal_error" + assert agent_span["status"] == "error" @pytest.mark.asyncio -async def test_error_handling(sentry_init, capture_events, test_agent): +async def test_error_handling(sentry_init, capture_items, test_agent): """ Test error handling in agent execution. """ @@ -1660,39 +1728,39 @@ async def test_error_handling(sentry_init, capture_events, test_agent): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("error", "span", "transaction") with pytest.raises(Exception, match="Model Error"): await agents.Runner.run( test_agent, "Test input", run_config=test_run_config ) - ( - error_event, - transaction, - ) = events - + error_events = [item.payload for item in items if item.type == "event"] + assert len(error_events) == 1 + error_event = error_events[0] assert error_event["exception"]["values"][0]["type"] == "Exception" assert error_event["exception"]["values"][0]["value"] == "Model Error" assert error_event["exception"]["values"][0]["mechanism"]["type"] == "openai_agents" - spans = transaction["spans"] - (invoke_agent_span, ai_client_span) = spans - + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert invoke_agent_span["origin"] == "auto.ai.openai_agents" + spans = [item.payload for item in items if item.type == "span"] + (invoke_agent_span, ai_client_span) = spans + + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert invoke_agent_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["origin"] == "auto.ai.openai_agents" - assert ai_client_span["status"] == "internal_error" - assert ai_client_span["tags"]["status"] == "internal_error" + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" + assert ai_client_span["status"] == "error" @pytest.mark.asyncio -async def test_error_captures_input_data(sentry_init, capture_events, test_agent): +async def test_error_captures_input_data(sentry_init, capture_items, test_agent): """ Test that input data is captured even when the API call raises an exception. This verifies that _set_input_data is called before the API call. @@ -1725,37 +1793,36 @@ async def test_error_captures_input_data(sentry_init, capture_events, test_agent send_default_pii=True, ) - events = capture_events() + items = capture_items("error", "span") with pytest.raises(InternalServerError, match="Error code: 500"): await agents.Runner.run(agent, "Test input", run_config=test_run_config) - ( - error_event, - transaction, - ) = events - + error_events = [item.payload for item in items if item.type == "event"] + assert len(error_events) == 1 + error_event = error_events[0] assert error_event["exception"]["values"][0]["type"] == "InternalServerError" assert error_event["exception"]["values"][0]["value"] == "Error code: 500" - spans = transaction["spans"] - ai_client_span = [s for s in spans if s["op"] == "gen_ai.chat"][0] + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ][0] - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["status"] == "internal_error" - assert ai_client_span["tags"]["status"] == "internal_error" + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["status"] == "error" - assert "gen_ai.request.messages" in ai_client_span["data"] + assert "gen_ai.request.messages" in ai_client_span["attributes"] request_messages = safe_serialize( [ {"role": "user", "content": [{"type": "text", "text": "Test input"}]}, ] ) - assert ai_client_span["data"]["gen_ai.request.messages"] == request_messages + assert ai_client_span["attributes"]["gen_ai.request.messages"] == request_messages @pytest.mark.asyncio -async def test_span_status_error(sentry_init, capture_events, test_agent): +async def test_span_status_error(sentry_init, capture_items, test_agent): with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}): with patch( "agents.models.openai_responses.OpenAIResponsesModel.get_response" @@ -1770,23 +1837,26 @@ async def test_span_status_error(sentry_init, capture_events, test_agent): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") with pytest.raises(ValueError, match="Model Error"): await agents.Runner.run( test_agent, "Test input", run_config=test_run_config ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" - assert transaction["contexts"]["trace"]["status"] == "internal_error" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") + assert transaction["contexts"]["trace"]["status"] == "error" @pytest.mark.asyncio async def test_mcp_tool_execution_spans( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that MCP (Model Context Protocol) tool calls create execute_tool spans. @@ -1880,7 +1950,7 @@ async def test_mcp_tool_execution_spans( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent, @@ -1888,33 +1958,35 @@ async def test_mcp_tool_execution_spans( run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the MCP execute_tool span mcp_tool_span = None for span in spans: - if span.get("description") == "execute_tool test_mcp_tool": + if span.get("name") == "execute_tool test_mcp_tool": mcp_tool_span = span break # Verify the MCP tool span was created assert mcp_tool_span is not None, "MCP execute_tool span was not created" - assert mcp_tool_span["description"] == "execute_tool test_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.name"] == "test_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.input"] == '{"query": "search term"}' + assert mcp_tool_span["name"] == "execute_tool test_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "test_mcp_tool" assert ( - mcp_tool_span["data"]["gen_ai.tool.output"] == "MCP tool executed successfully" + mcp_tool_span["attributes"]["gen_ai.tool.input"] == '{"query": "search term"}' + ) + assert ( + mcp_tool_span["attributes"]["gen_ai.tool.output"] + == "MCP tool executed successfully" ) # Verify no error status since error was None - assert mcp_tool_span.get("status") != "internal_error" - assert mcp_tool_span.get("tags", {}).get("status") != "internal_error" + assert mcp_tool_span.get("status") != "error" + assert mcp_tool_span.get("tags", {}).get("status") != "error" @pytest.mark.asyncio async def test_mcp_tool_execution_with_error( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that MCP tool calls with errors are tracked with error status. @@ -2008,7 +2080,7 @@ async def test_mcp_tool_execution_with_error( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent, @@ -2016,31 +2088,29 @@ async def test_mcp_tool_execution_with_error( run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the MCP execute_tool span with error mcp_tool_span = None for span in spans: - if span.get("description") == "execute_tool failing_mcp_tool": + if span.get("name") == "execute_tool failing_mcp_tool": mcp_tool_span = span break # Verify the MCP tool span was created with error status assert mcp_tool_span is not None, "MCP execute_tool span was not created" - assert mcp_tool_span["description"] == "execute_tool failing_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.name"] == "failing_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.input"] == '{"query": "test"}' - assert mcp_tool_span["data"]["gen_ai.tool.output"] is None + assert mcp_tool_span["name"] == "execute_tool failing_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "failing_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.input"] == '{"query": "test"}' + assert mcp_tool_span["attributes"]["gen_ai.tool.output"] is None # Verify error status was set - assert mcp_tool_span["status"] == "internal_error" - assert mcp_tool_span["tags"]["status"] == "internal_error" + assert mcp_tool_span["status"] == "error" @pytest.mark.asyncio async def test_mcp_tool_execution_without_pii( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that MCP tool input/output are not included when send_default_pii is False. @@ -2134,7 +2204,7 @@ async def test_mcp_tool_execution_without_pii( send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent, @@ -2142,30 +2212,29 @@ async def test_mcp_tool_execution_without_pii( run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the MCP execute_tool span mcp_tool_span = None for span in spans: - if span.get("description") == "execute_tool test_mcp_tool": + if span.get("name") == "execute_tool test_mcp_tool": mcp_tool_span = span break # Verify the MCP tool span was created but without input/output assert mcp_tool_span is not None, "MCP execute_tool span was not created" - assert mcp_tool_span["description"] == "execute_tool test_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.name"] == "test_mcp_tool" + assert mcp_tool_span["name"] == "execute_tool test_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "test_mcp_tool" # Verify input and output are not included when send_default_pii is False - assert "gen_ai.tool.input" not in mcp_tool_span["data"] - assert "gen_ai.tool.output" not in mcp_tool_span["data"] + assert "gen_ai.tool.input" not in mcp_tool_span["attributes"] + assert "gen_ai.tool.output" not in mcp_tool_span["attributes"] @pytest.mark.asyncio async def test_multiple_agents_asyncio( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -2192,7 +2261,7 @@ async def test_multiple_agents_asyncio( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") async def run(): await agents.Runner.run( @@ -2203,14 +2272,10 @@ async def run(): await asyncio.gather(*[run() for _ in range(3)]) - assert len(events) == 3 - txn1, txn2, txn3 = events + txn1, txn2, txn3 = (item.payload for item in items if item.type == "transaction") - assert txn1["type"] == "transaction" assert txn1["transaction"] == "test_agent workflow" - assert txn2["type"] == "transaction" assert txn2["transaction"] == "test_agent workflow" - assert txn3["type"] == "transaction" assert txn3["transaction"] == "test_agent workflow" @@ -2230,7 +2295,7 @@ async def run(): ], ) def test_openai_agents_message_role_mapping( - sentry_init, capture_events, test_message, expected_role + sentry_init, capture_items, test_message, expected_role ): """Test that OpenAI Agents integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( @@ -2259,7 +2324,7 @@ def test_openai_agents_message_role_mapping( @pytest.mark.asyncio async def test_tool_execution_error_tracing( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, responses_tool_call_model_responses, @@ -2338,7 +2403,7 @@ def failing_tool(message: str) -> str: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") # Note: The agents library catches tool exceptions internally, # so we don't expect this to raise @@ -2348,13 +2413,12 @@ def failing_tool(message: str) -> str: run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the execute_tool span execute_tool_span = None for span in spans: - description = span.get("description", "") + description = span.get("name", "") if description is not None and description.startswith( "execute_tool failing_tool" ): @@ -2363,19 +2427,18 @@ def failing_tool(message: str) -> str: # Verify the execute_tool span was created assert execute_tool_span is not None, "execute_tool span was not created" - assert execute_tool_span["description"] == "execute_tool failing_tool" - assert execute_tool_span["data"]["gen_ai.tool.name"] == "failing_tool" + assert execute_tool_span["name"] == "execute_tool failing_tool" + assert execute_tool_span["attributes"]["gen_ai.tool.name"] == "failing_tool" # Verify error status was set (this is the key test for our patch) # The span should be marked as error because the tool execution failed - assert execute_tool_span["status"] == "internal_error" - assert execute_tool_span["tags"]["status"] == "internal_error" + assert execute_tool_span["status"] == "error" @pytest.mark.asyncio async def test_invoke_agent_span_includes_usage_data( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2437,7 +2500,7 @@ async def test_invoke_agent_span_includes_usage_data( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2445,29 +2508,30 @@ async def test_invoke_agent_span_includes_usage_data( assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ) # Verify invoke_agent span has usage data from context_wrapper - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert "gen_ai.usage.input_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.output_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.total_tokens" in invoke_agent_span["data"] + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert "gen_ai.usage.input_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.output_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.total_tokens" in invoke_agent_span["attributes"] - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens.cached"] == 0 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens.reasoning"] == 5 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens.cached"] == 0 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 5 @pytest.mark.asyncio async def test_ai_client_span_includes_response_model( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2529,7 +2593,7 @@ async def test_ai_client_span_includes_response_model( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2537,20 +2601,21 @@ async def test_ai_client_span_includes_response_model( assert result is not None - (transaction,) = events - spans = transaction["spans"] - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) # Verify ai_client span has response model from API response - assert ai_client_span["description"] == "chat gpt-4" - assert "gen_ai.response.model" in ai_client_span["data"] - assert ai_client_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert ai_client_span["name"] == "chat gpt-4" + assert "gen_ai.response.model" in ai_client_span["attributes"] + assert ai_client_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" @pytest.mark.asyncio async def test_ai_client_span_response_model_with_chat_completions( sentry_init, - capture_events, + capture_items, get_model_response, ): """ @@ -2616,7 +2681,7 @@ async def test_ai_client_span_response_model_with_chat_completions( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2624,18 +2689,22 @@ async def test_ai_client_span_response_model_with_chat_completions( assert result is not None - (transaction,) = events - spans = transaction["spans"] - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) # Verify response model from API response is captured - assert "gen_ai.response.model" in ai_client_span["data"] - assert ai_client_span["data"]["gen_ai.response.model"] == "gpt-4o-mini-2024-07-18" + assert "gen_ai.response.model" in ai_client_span["attributes"] + assert ( + ai_client_span["attributes"]["gen_ai.response.model"] + == "gpt-4o-mini-2024-07-18" + ) @pytest.mark.asyncio async def test_multiple_llm_calls_aggregate_usage( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that invoke_agent spans show aggregated usage across multiple LLM calls @@ -2734,7 +2803,7 @@ def calculator(a: int, b: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent_with_tool, @@ -2744,25 +2813,24 @@ def calculator(a: int, b: int) -> int: assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = spans[0] # Verify invoke_agent span has aggregated usage from both API calls # Total: 10 + 20 = 30 input tokens, 5 + 15 = 20 output tokens, 15 + 35 = 50 total - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 50 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 50 # Cached tokens should be aggregated: 0 + 5 = 5 - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens.cached"] == 5 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens.cached"] == 5 # Reasoning tokens should be aggregated: 0 + 3 = 3 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens.reasoning"] == 3 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 3 @pytest.mark.asyncio async def test_invoke_agent_span_includes_response_model( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2823,7 +2891,7 @@ async def test_invoke_agent_span_includes_response_model( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2831,27 +2899,32 @@ async def test_invoke_agent_span_includes_response_model( assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) # Verify invoke_agent span has response model from API - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) # Also verify ai_client span has it - assert "gen_ai.response.model" in ai_client_span["data"] - assert ai_client_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in ai_client_span["attributes"] + assert ai_client_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" @pytest.mark.asyncio async def test_invoke_agent_span_uses_last_response_model( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2952,7 +3025,7 @@ def calculator(a: int, b: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent_with_tool, @@ -2962,24 +3035,26 @@ def calculator(a: int, b: int) -> int: assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = spans[0] first_ai_client_span = spans[1] second_ai_client_span = spans[3] # After tool span # Invoke_agent span uses the LAST response model - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) # Each ai_client span has its own response model from the API - assert first_ai_client_span["data"]["gen_ai.response.model"] == "gpt-4-0613" + assert first_ai_client_span["attributes"]["gen_ai.response.model"] == "gpt-4-0613" assert ( - second_ai_client_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + second_ai_client_span["attributes"]["gen_ai.response.model"] + == "gpt-4.1-2025-04-14" ) -def test_openai_agents_message_truncation(sentry_init, capture_events): +def test_openai_agents_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in OpenAI Agents integration.""" large_content = ( @@ -3230,7 +3305,7 @@ async def test_streaming_ttft_on_chat_span( @pytest.mark.asyncio async def test_conversation_id_on_all_spans( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -3257,7 +3332,7 @@ async def test_conversation_id_on_all_spans( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, @@ -3268,24 +3343,28 @@ async def test_conversation_id_on_all_spans( assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) # Verify workflow span (transaction) has conversation_id + (transaction,) = (item.payload for item in items if item.type == "transaction") assert ( transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"] == "conv_test_123" ) # Verify invoke_agent span has conversation_id - assert invoke_agent_span["data"]["gen_ai.conversation.id"] == "conv_test_123" + assert invoke_agent_span["attributes"]["gen_ai.conversation.id"] == "conv_test_123" # Verify ai_client span has conversation_id - assert ai_client_span["data"]["gen_ai.conversation.id"] == "conv_test_123" + assert ai_client_span["attributes"]["gen_ai.conversation.id"] == "conv_test_123" @pytest.mark.skipif( @@ -3294,7 +3373,7 @@ async def test_conversation_id_on_all_spans( ) @pytest.mark.asyncio async def test_conversation_id_on_tool_span( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that gen_ai.conversation.id is set on tool execution spans when passed to Runner.run(). @@ -3391,7 +3470,7 @@ def simple_tool(message: str) -> str: traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent_with_tool, @@ -3400,21 +3479,20 @@ def simple_tool(message: str) -> str: conversation_id="conv_tool_test_456", ) - (transaction,) = events - spans = transaction["spans"] - + spans = [item.payload for item in items if item.type == "span"] # Find the tool span tool_span = None for span in spans: - if span.get("description", "").startswith("execute_tool"): + if span.get("name", "").startswith("execute_tool"): tool_span = span break assert tool_span is not None # Tool span should have the conversation_id passed to Runner.run() - assert tool_span["data"]["gen_ai.conversation.id"] == "conv_tool_test_456" + assert tool_span["attributes"]["gen_ai.conversation.id"] == "conv_tool_test_456" # Workflow span (transaction) should have the same conversation_id + (transaction,) = (item.payload for item in items if item.type == "transaction") assert ( transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"] == "conv_tool_test_456" @@ -3428,7 +3506,7 @@ def simple_tool(message: str) -> str: @pytest.mark.asyncio async def test_no_conversation_id_when_not_provided( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -3455,7 +3533,7 @@ async def test_no_conversation_id_when_not_provided( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") # Don't pass conversation_id result = await agents.Runner.run( @@ -3464,16 +3542,23 @@ async def test_no_conversation_id_when_not_provided( assert result is not None - (transaction,) = events - spans = transaction["spans"] + transactions = [item.payload for item in items if item.type == "transaction"] + assert len(transactions) == 1 + transaction = transactions[0] + + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) # Verify conversation_id is NOT set on any spans assert "gen_ai.conversation.id" not in transaction["contexts"]["trace"].get( - "data", {} + "attributes", {} ) - assert "gen_ai.conversation.id" not in invoke_agent_span.get("data", {}) - assert "gen_ai.conversation.id" not in ai_client_span.get("data", {}) + assert "gen_ai.conversation.id" not in invoke_agent_span.get("attributes", {}) + assert "gen_ai.conversation.id" not in ai_client_span.get("attributes", {}) From 41e409d73164807c557a0ee7563bdd1655f56d83 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 13:46:26 +0200 Subject: [PATCH 21/36] fix openai-agents tests --- tests/integrations/openai_agents/test_openai_agents.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index 1c4925915d..294812b0ca 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -525,7 +525,7 @@ async def test_client_span_custom_model( traces_sample_rate=1.0, ) - items = capture_items("transaction", "spans") + items = capture_items("span") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -1728,7 +1728,7 @@ async def test_error_handling(sentry_init, capture_items, test_agent): traces_sample_rate=1.0, ) - items = capture_items("error", "span", "transaction") + items = capture_items("event", "span", "transaction") with pytest.raises(Exception, match="Model Error"): await agents.Runner.run( @@ -1793,7 +1793,7 @@ async def test_error_captures_input_data(sentry_init, capture_items, test_agent) send_default_pii=True, ) - items = capture_items("error", "span") + items = capture_items("event", "span") with pytest.raises(InternalServerError, match="Error code: 500"): await agents.Runner.run(agent, "Test input", run_config=test_run_config) @@ -1851,7 +1851,7 @@ async def test_span_status_error(sentry_init, capture_items, test_agent): assert spans[0]["status"] == "error" (transaction,) = (item.payload for item in items if item.type == "transaction") - assert transaction["contexts"]["trace"]["status"] == "error" + assert transaction["contexts"]["trace"]["status"] == "internal_error" @pytest.mark.asyncio @@ -2102,7 +2102,7 @@ async def test_mcp_tool_execution_with_error( assert mcp_tool_span["name"] == "execute_tool failing_mcp_tool" assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "failing_mcp_tool" assert mcp_tool_span["attributes"]["gen_ai.tool.input"] == '{"query": "test"}' - assert mcp_tool_span["attributes"]["gen_ai.tool.output"] is None + assert mcp_tool_span["attributes"]["gen_ai.tool.output"] == "None" # Verify error status was set assert mcp_tool_span["status"] == "error" From 8bf77f0ed1b351923f1c6fa5956437a952f75c9d Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 13:51:40 +0200 Subject: [PATCH 22/36] fix common tests --- tests/tracing/test_decorator.py | 51 ++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/tests/tracing/test_decorator.py b/tests/tracing/test_decorator.py index e73323138a..bbb7e85b1a 100644 --- a/tests/tracing/test_decorator.py +++ b/tests/tracing/test_decorator.py @@ -170,7 +170,6 @@ def my_agent(): item.payload for item in items if item.type == "span" ) - assert agent_span["attributes"]["sentry.op"] == "gen_ai.invoke_agent" assert ( agent_span["name"] == "invoke_agent test_decorator.test_span_templates_ai_dicts..my_agent" @@ -190,7 +189,6 @@ def my_agent(): "thread.name": mock.ANY, } - assert tool_span["attributes"]["sentry.op"] == "gen_ai.execute_tool" assert ( tool_span["name"] == "execute_tool test_decorator.test_span_templates_ai_dicts..my_tool" @@ -214,7 +212,6 @@ def my_agent(): } assert "gen_ai.tool.description" not in tool_span["attributes"] - assert chat_span["attributes"]["sentry.op"] == "gen_ai.chat" assert chat_span["name"] == "chat my-gpt-4o-mini" assert chat_span["attributes"] == { "gen_ai.operation.name": "chat", @@ -243,9 +240,9 @@ def my_agent(): } -def test_span_templates_ai_objects(sentry_init, capture_events): +def test_span_templates_ai_objects(sentry_init, capture_items): sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) def my_tool(arg1, arg2): @@ -292,40 +289,54 @@ def my_agent(): with sentry_sdk.start_transaction(name="test-transaction"): my_agent() - (event,) = events - (agent_span, tool_span, chat_span) = event["spans"] + (agent_span, tool_span, chat_span) = ( + item.payload for item in items if item.type == "span" + ) - assert agent_span["op"] == "gen_ai.invoke_agent" assert ( - agent_span["description"] + agent_span["name"] == "invoke_agent test_decorator.test_span_templates_ai_objects..my_agent" ) - assert agent_span["data"] == { + assert agent_span["attributes"] == { "gen_ai.agent.name": "test_decorator.test_span_templates_ai_objects..my_agent", "gen_ai.operation.name": "invoke_agent", + "sentry.environment": "production", + "sentry.op": "gen_ai.invoke_agent", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert tool_span["op"] == "gen_ai.execute_tool" assert ( - tool_span["description"] + tool_span["name"] == "execute_tool test_decorator.test_span_templates_ai_objects..my_tool" ) - assert tool_span["data"] == { + assert tool_span["attributes"] == { "gen_ai.tool.name": "test_decorator.test_span_templates_ai_objects..my_tool", "gen_ai.tool.description": "This is a tool function.", "gen_ai.operation.name": "execute_tool", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, "gen_ai.usage.total_tokens": 30, + "sentry.environment": "production", + "sentry.op": "gen_ai.execute_tool", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert chat_span["op"] == "gen_ai.chat" - assert chat_span["description"] == "chat my-gpt-4o-mini" - assert chat_span["data"] == { + assert chat_span["name"] == "chat my-gpt-4o-mini" + assert chat_span["attributes"] == { "gen_ai.operation.name": "chat", "gen_ai.request.frequency_penalty": 1.0, "gen_ai.request.max_tokens": 100, @@ -339,6 +350,14 @@ def my_agent(): "gen_ai.usage.input_tokens": 11, "gen_ai.usage.output_tokens": 22, "gen_ai.usage.total_tokens": 33, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } From 7c3da4fdab771be2ae50dc741156951230d88c83 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 13:52:39 +0200 Subject: [PATCH 23/36] client handle None --- sentry_sdk/client.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 99e58ec499..356b68e254 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -149,6 +149,12 @@ def _serialized_v1_attribute_to_serialized_v2_attribute( "type": "string", } + if attribute_value is None: + return { + "value": "None", + "type": "string", + } + return None From 06c2a40a6dd723e0a1ed0e6ee7166efe4068e179 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 13:54:05 +0200 Subject: [PATCH 24/36] fix item_count --- sentry_sdk/client.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 356b68e254..0d13b6db03 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -1126,23 +1126,21 @@ def capture_event( event_opt["spans"] = non_gen_ai_spans envelope.add_transaction(event_opt) + converted_gen_ai_spans = [ + _serialized_v1_span_to_serialized_v2_span(span, event) + for span in gen_ai_spans + if isinstance(span, dict) + ] + envelope.add_item( Item( type=SpanBatcher.TYPE, content_type=SpanBatcher.CONTENT_TYPE, headers={ - "item_count": len(gen_ai_spans), + "item_count": len(converted_gen_ai_spans), }, payload=PayloadRef( - json={ - "items": [ - _serialized_v1_span_to_serialized_v2_span( - span, event - ) - for span in gen_ai_spans - if isinstance(span, dict) - ] - }, + json={"items": converted_gen_ai_spans}, ), ) ) From 204b9809f6efa06aad3b9f1914d169d1c677e286 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:02:48 +0200 Subject: [PATCH 25/36] fix common tests --- tests/tracing/test_decorator.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tests/tracing/test_decorator.py b/tests/tracing/test_decorator.py index bbb7e85b1a..5f5adec2cb 100644 --- a/tests/tracing/test_decorator.py +++ b/tests/tracing/test_decorator.py @@ -364,9 +364,9 @@ def my_agent(): @pytest.mark.parametrize("send_default_pii", [True, False]) -def test_span_templates_ai_pii(sentry_init, capture_events, send_default_pii): +def test_span_templates_ai_pii(sentry_init, capture_items, send_default_pii): sentry_init(traces_sample_rate=1.0, send_default_pii=send_default_pii) - events = capture_events() + items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) def my_tool(arg1, arg2, **kwargs): @@ -396,15 +396,14 @@ def my_agent(*args, **kwargs): with sentry_sdk.start_transaction(name="test-transaction"): my_agent(22, 33, arg1=44, arg2=55) - (event,) = events - (_, tool_span, _) = event["spans"] + (_, tool_span, _) = (item.payload for item in items if item.type == "span") if send_default_pii: assert ( - tool_span["data"]["gen_ai.tool.input"] + tool_span["attributes"]["gen_ai.tool.input"] == "{'args': (1, 2), 'kwargs': {'tool_arg1': '3', 'tool_arg2': '4'}}" ) - assert tool_span["data"]["gen_ai.tool.output"] == "'tool_output'" + assert tool_span["attributes"]["gen_ai.tool.output"] == "'tool_output'" else: - assert "gen_ai.tool.input" not in tool_span["data"] - assert "gen_ai.tool.output" not in tool_span["data"] + assert "gen_ai.tool.input" not in tool_span["attributes"] + assert "gen_ai.tool.output" not in tool_span["attributes"] From 00733f960e239bb4a4c606580bc0e9a05f97ec42 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:08:15 +0200 Subject: [PATCH 26/36] fix common tests --- tests/tracing/test_misc.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index f69e19791a..bb8d942335 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -625,15 +625,15 @@ def test_conversation_id_propagates_to_span_with_gen_ai_operation_name( span.set_data("gen_ai.operation.name", "chat") spans = [item.payload for item in items if item.type == "span"] - span_data = spans[0]["data"] + span_data = spans[0]["attributes"] assert span_data.get("gen_ai.conversation.id") == "conv-op-name-test" def test_conversation_id_propagates_to_span_with_ai_op( - self, sentry_init, capture_events + self, sentry_init, capture_items ): """Span with ai.* op should get conversation_id.""" sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("span") scope = sentry_sdk.get_current_scope() scope.set_conversation_id("conv-ai-op-test") @@ -642,8 +642,8 @@ def test_conversation_id_propagates_to_span_with_ai_op( with start_span(op="ai.chat.completions"): pass - (event,) = events - span_data = event["spans"][0]["data"] + spans = [item.payload for item in items if item.type == "span"] + span_data = spans[0]["attributes"] assert span_data.get("gen_ai.conversation.id") == "conv-ai-op-test" def test_conversation_id_propagates_to_span_with_gen_ai_op( From a54cab4ce7b94624f5de991a1615e632da71f5f9 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:16:23 +0200 Subject: [PATCH 27/36] common tests --- tests/tracing/test_misc.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index bb8d942335..8895c98dbc 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -611,11 +611,11 @@ class TestConversationIdPropagation: """Tests for conversation_id propagation to AI spans.""" def test_conversation_id_propagates_to_span_with_gen_ai_operation_name( - self, sentry_init, capture_items + self, sentry_init, capture_events ): """Span with gen_ai.operation.name data should get conversation_id.""" sentry_init(traces_sample_rate=1.0) - items = capture_items("span") + events = capture_events() scope = sentry_sdk.get_current_scope() scope.set_conversation_id("conv-op-name-test") @@ -624,16 +624,16 @@ def test_conversation_id_propagates_to_span_with_gen_ai_operation_name( with start_span(op="http.client") as span: span.set_data("gen_ai.operation.name", "chat") - spans = [item.payload for item in items if item.type == "span"] - span_data = spans[0]["attributes"] + (event,) = events + span_data = event["spans"][0]["data"] assert span_data.get("gen_ai.conversation.id") == "conv-op-name-test" def test_conversation_id_propagates_to_span_with_ai_op( - self, sentry_init, capture_items + self, sentry_init, capture_events ): """Span with ai.* op should get conversation_id.""" sentry_init(traces_sample_rate=1.0) - items = capture_items("span") + events = capture_events() scope = sentry_sdk.get_current_scope() scope.set_conversation_id("conv-ai-op-test") @@ -642,8 +642,8 @@ def test_conversation_id_propagates_to_span_with_ai_op( with start_span(op="ai.chat.completions"): pass - spans = [item.payload for item in items if item.type == "span"] - span_data = spans[0]["attributes"] + (event,) = events + span_data = event["spans"][0]["data"] assert span_data.get("gen_ai.conversation.id") == "conv-ai-op-test" def test_conversation_id_propagates_to_span_with_gen_ai_op( From 4b0c47b28f8a4bf62de2e3a0a9d888ba908fe1b8 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:24:18 +0200 Subject: [PATCH 28/36] tests --- tests/tracing/test_misc.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index 8895c98dbc..0188b08a88 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -647,11 +647,11 @@ def test_conversation_id_propagates_to_span_with_ai_op( assert span_data.get("gen_ai.conversation.id") == "conv-ai-op-test" def test_conversation_id_propagates_to_span_with_gen_ai_op( - self, sentry_init, capture_events + self, sentry_init, capture_items ): """Span with gen_ai.* op should get conversation_id.""" sentry_init(traces_sample_rate=1.0) - events = capture_events() + items = capture_items("span") scope = sentry_sdk.get_current_scope() scope.set_conversation_id("conv-gen-ai-op-test") @@ -660,8 +660,8 @@ def test_conversation_id_propagates_to_span_with_gen_ai_op( with start_span(op="gen_ai.invoke_agent"): pass - (event,) = events - span_data = event["spans"][0]["data"] + spans = [item.payload for item in items if item.type == "span"] + span_data = spans[0]["attributes"] assert span_data.get("gen_ai.conversation.id") == "conv-gen-ai-op-test" def test_conversation_id_not_propagated_to_non_ai_span( From 6c5c812faa8879523fb4f90c650327a7f70a1d81 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:46:25 +0200 Subject: [PATCH 29/36] add experimental v2 option --- .../integrations/anthropic/test_anthropic.py | 53 ++++++++++++ .../google_genai/test_google_genai.py | 37 ++++++++ .../huggingface_hub/test_huggingface_hub.py | 8 ++ .../integrations/langchain/test_langchain.py | 26 ++++++ tests/integrations/litellm/test_litellm.py | 28 ++++++ tests/integrations/openai/test_openai.py | 42 +++++++++ .../openai_agents/test_openai_agents.py | 32 +++++++ .../pydantic_ai/test_pydantic_ai.py | 85 +++++++++++++++++++ 8 files changed, 311 insertions(+) diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index c7fc280b6c..aedab1578b 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -97,6 +97,7 @@ def test_nonstreaming_create_message( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -171,6 +172,7 @@ async def test_nonstreaming_create_message_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") @@ -287,6 +289,7 @@ def test_streaming_create_message( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -395,6 +398,7 @@ def test_streaming_create_message_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -498,6 +502,7 @@ def test_streaming_create_message_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -614,6 +619,7 @@ def test_stream_messages( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -723,6 +729,7 @@ def test_stream_messages_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -831,6 +838,7 @@ def test_stream_messages_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -953,6 +961,7 @@ async def test_streaming_create_message_async( traces_sample_rate=1.0, default_integrations=False, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1064,6 +1073,7 @@ async def test_streaming_create_message_async_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1170,6 +1180,7 @@ async def test_streaming_create_message_async_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1290,6 +1301,7 @@ async def test_stream_message_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1400,6 +1412,7 @@ async def test_stream_messages_async_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1510,6 +1523,7 @@ async def test_stream_messages_async_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1666,6 +1680,7 @@ def test_streaming_create_message_with_input_json_delta( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1815,6 +1830,7 @@ def test_stream_messages_with_input_json_delta( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1972,6 +1988,7 @@ async def test_streaming_create_message_with_input_json_delta_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2129,6 +2146,7 @@ async def test_stream_message_with_input_json_delta_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2188,6 +2206,7 @@ async def test_stream_message_with_input_json_delta_async( def test_exception_message_create(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction") client = Anthropic(api_key="z") @@ -2210,6 +2229,7 @@ def test_exception_message_create(sentry_init, capture_items): def test_span_status_error(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "span") with start_transaction(name="anthropic"): @@ -2236,6 +2256,7 @@ def test_span_status_error(sentry_init, capture_items): @pytest.mark.asyncio async def test_span_status_error_async(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "span") with start_transaction(name="anthropic"): @@ -2262,6 +2283,7 @@ async def test_span_status_error_async(sentry_init, capture_items): @pytest.mark.asyncio async def test_exception_message_create_async(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction") client = AsyncAnthropic(api_key="z") @@ -2286,6 +2308,7 @@ def test_span_origin(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2316,6 +2339,7 @@ async def test_span_origin_async(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2379,6 +2403,7 @@ def test_set_output_data_with_input_json_delta(sentry_init): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with start_transaction(name="test"): @@ -2429,6 +2454,7 @@ def test_anthropic_message_role_mapping( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2475,6 +2501,7 @@ def test_anthropic_message_truncation(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2525,6 +2552,7 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2585,6 +2613,7 @@ def test_nonstreaming_create_message_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -2671,6 +2700,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") @@ -2800,6 +2830,7 @@ def test_streaming_create_message_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2930,6 +2961,7 @@ def test_stream_messages_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3062,6 +3094,7 @@ async def test_stream_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3194,6 +3227,7 @@ async def test_streaming_create_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3269,6 +3303,7 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3522,6 +3557,7 @@ def test_message_with_base64_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3572,6 +3608,7 @@ def test_message_with_url_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3615,6 +3652,7 @@ def test_message_with_file_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3659,6 +3697,7 @@ def test_message_with_base64_pdf(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3703,6 +3742,7 @@ def test_message_with_url_pdf(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3746,6 +3786,7 @@ def test_message_with_file_document(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3790,6 +3831,7 @@ def test_message_with_mixed_content(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3872,6 +3914,7 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_ite integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3946,6 +3989,7 @@ def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_items) integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3984,6 +4028,7 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_it integrations=[AnthropicIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4019,6 +4064,7 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_it def test_cache_tokens_nonstreaming(sentry_init, capture_items): """Test cache read/write tokens are tracked for non-streaming responses.""" sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4067,6 +4113,7 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_item cache_creation_input_tokens=2846, cache_read_input_tokens=0) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4115,6 +4162,7 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_items cache_creation_input_tokens=0, cache_read_input_tokens=2846) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4192,6 +4240,7 @@ def test_input_tokens_include_cache_read_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -4258,6 +4307,7 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -4291,6 +4341,7 @@ def test_input_tokens_unchanged_without_caching(sentry_init, capture_items): Usage(input_tokens=20, output_tokens=12) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4359,6 +4410,7 @@ def test_cache_tokens_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -4419,6 +4471,7 @@ def test_stream_messages_cache_tokens( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py index e074b79c8c..ae31fe565b 100644 --- a/tests/integrations/google_genai/test_google_genai.py +++ b/tests/integrations/google_genai/test_google_genai.py @@ -130,6 +130,7 @@ def test_nonstreaming_generate_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -219,6 +220,7 @@ def test_generate_content_with_system_instruction( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -262,6 +264,7 @@ def test_generate_content_with_tools(sentry_init, capture_items, mock_genai_clie sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -344,6 +347,7 @@ def test_tool_execution(sentry_init, capture_items): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -380,6 +384,7 @@ def test_error_handling(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction") @@ -411,6 +416,7 @@ def test_streaming_generate_content(sentry_init, capture_items, mock_genai_clien integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -527,6 +533,7 @@ def test_span_origin(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -554,6 +561,7 @@ def test_response_without_usage_metadata(sentry_init, capture_items, mock_genai_ sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -595,6 +603,7 @@ def test_multiple_candidates(sentry_init, capture_items, mock_genai_client): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -659,6 +668,7 @@ def test_all_configuration_parameters(sentry_init, capture_items, mock_genai_cli sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -698,6 +708,7 @@ def test_empty_response(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -729,6 +740,7 @@ def test_response_with_different_id_fields( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -771,6 +783,7 @@ def test_tool_with_async_function(sentry_init): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create an async tool function @@ -793,6 +806,7 @@ def test_contents_as_none(sentry_init, capture_items, mock_genai_client): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -819,6 +833,7 @@ def test_tool_calls_extraction(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -905,6 +920,7 @@ def test_google_genai_message_truncation(sentry_init, capture_items, mock_genai_ integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -980,6 +996,7 @@ def test_embed_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1041,6 +1058,7 @@ def test_embed_content_string_input(sentry_init, capture_items, mock_genai_clien integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1087,6 +1105,7 @@ def test_embed_content_error_handling(sentry_init, capture_items, mock_genai_cli sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "event") @@ -1120,6 +1139,7 @@ def test_embed_content_without_statistics( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1159,6 +1179,7 @@ def test_embed_content_span_origin(sentry_init, capture_items, mock_genai_client sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1199,6 +1220,7 @@ async def test_async_embed_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1263,6 +1285,7 @@ async def test_async_embed_content_string_input( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1312,6 +1335,7 @@ async def test_async_embed_content_error_handling( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "event") @@ -1346,6 +1370,7 @@ async def test_async_embed_content_without_statistics( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1388,6 +1413,7 @@ async def test_async_embed_content_span_origin( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1419,6 +1445,7 @@ def test_generate_content_with_content_object( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1455,6 +1482,7 @@ def test_generate_content_with_dict_format( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1487,6 +1515,7 @@ def test_generate_content_with_file_data(sentry_init, capture_items, mock_genai_ integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1536,6 +1565,7 @@ def test_generate_content_with_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1581,6 +1611,7 @@ def test_generate_content_with_function_response( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1635,6 +1666,7 @@ def test_generate_content_with_mixed_string_and_content( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1678,6 +1710,7 @@ def test_generate_content_with_part_object_directly( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1716,6 +1749,7 @@ def test_generate_content_with_list_of_dicts( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1752,6 +1786,7 @@ def test_generate_content_with_dict_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1796,6 +1831,7 @@ def test_generate_content_without_parts_property_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1839,6 +1875,7 @@ def test_generate_content_without_parts_property_inline_data_and_binary_data_wit integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 98abbb00fa..16c27b678d 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -480,6 +480,7 @@ def test_text_generation( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -555,6 +556,7 @@ def test_text_generation_streaming( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -631,6 +633,7 @@ def test_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -709,6 +712,7 @@ def test_chat_completion_streaming( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -780,6 +784,7 @@ def test_chat_completion_api_error( sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: sentry_init(traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -839,6 +844,7 @@ def test_span_status_error( sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: sentry_init(traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -881,6 +887,7 @@ def test_chat_completion_with_tools( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -976,6 +983,7 @@ def test_chat_completion_streaming_with_tools( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index f709d12129..5002d050b9 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -108,6 +108,7 @@ def test_langchain_text_completion( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -216,6 +217,7 @@ def test_langchain_create_agent( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -336,6 +338,7 @@ def test_tool_execution_span( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -528,6 +531,7 @@ def test_langchain_openai_tools_agent( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -865,6 +869,7 @@ def test_langchain_error(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -903,6 +908,7 @@ def test_span_status_error(sentry_init, capture_items): sentry_init( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -988,6 +994,7 @@ def _identifying_params(self): return {} sentry_init(integrations=[LangchainIntegration()]) + _experiments = ({"gen_ai_as_v2_spans": True},) # Create a manual SentryLangchainCallback manual_callback = SentryLangchainCallback( @@ -1028,6 +1035,7 @@ def test_langchain_callback_manager(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) local_manager = BaseCallbackManager(handlers=[]) @@ -1060,6 +1068,7 @@ def test_langchain_callback_manager_with_sentry_callback(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) sentry_callback = SentryLangchainCallback(0, False) local_manager = BaseCallbackManager(handlers=[sentry_callback]) @@ -1092,6 +1101,7 @@ def test_langchain_callback_list(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) local_callbacks = [] @@ -1124,6 +1134,7 @@ def test_langchain_callback_list_existing_callback(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) sentry_callback = SentryLangchainCallback(0, False) local_callbacks = [sentry_callback] @@ -1161,6 +1172,7 @@ def test_langchain_message_role_mapping(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1298,6 +1310,7 @@ def test_langchain_message_truncation(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1390,6 +1403,7 @@ def test_langchain_embeddings_sync( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1468,6 +1482,7 @@ def test_langchain_embeddings_embed_query( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1542,6 +1557,7 @@ async def test_langchain_embeddings_async( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1614,6 +1630,7 @@ async def test_langchain_embeddings_aembed_query(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1675,6 +1692,7 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_items): sentry_init( integrations=[LangchainIntegration(include_prompts=False)], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1724,6 +1742,7 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_items): # Initialize without LangchainIntegration sentry_init(traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -1760,6 +1779,7 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1817,6 +1837,7 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1857,6 +1878,7 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1920,6 +1942,7 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1973,6 +1996,7 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_i integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2037,6 +2061,7 @@ def test_langchain_response_model_extraction( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2342,6 +2367,7 @@ def test_langchain_ai_system_detection( sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/litellm/test_litellm.py b/tests/integrations/litellm/test_litellm.py index 90807744e7..b9365e7008 100644 --- a/tests/integrations/litellm/test_litellm.py +++ b/tests/integrations/litellm/test_litellm.py @@ -152,6 +152,7 @@ def test_nonstreaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -233,6 +234,7 @@ async def test_async_nonstreaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -316,6 +318,7 @@ def test_streaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -386,6 +389,7 @@ async def test_async_streaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -452,6 +456,7 @@ def test_embeddings_create( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -521,6 +526,7 @@ async def test_async_embeddings_create( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -585,6 +591,7 @@ def test_embeddings_create_with_list_input( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -647,6 +654,7 @@ async def test_async_embeddings_create_with_list_input( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -709,6 +717,7 @@ def test_embeddings_no_pii( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -765,6 +774,7 @@ async def test_async_embeddings_no_pii( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -815,6 +825,7 @@ def test_exception_handling( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -853,6 +864,7 @@ async def test_async_exception_handling( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -894,6 +906,7 @@ def test_span_origin( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -941,6 +954,7 @@ def test_multiple_providers( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction") @@ -1036,6 +1050,7 @@ async def test_async_multiple_providers( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1132,6 +1147,7 @@ def test_additional_parameters( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1191,6 +1207,7 @@ async def test_async_additional_parameters( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1250,6 +1267,7 @@ def test_no_integration( """Test that when integration is not enabled, callbacks don't break.""" sentry_init( traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1296,6 +1314,7 @@ async def test_async_no_integration( """Test that when integration is not enabled, callbacks don't break.""" sentry_init( traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1338,6 +1357,7 @@ def test_response_without_usage(sentry_init, capture_items): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1379,6 +1399,7 @@ def test_integration_setup(sentry_init): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Check that callbacks are registered @@ -1393,6 +1414,7 @@ def test_litellm_message_truncation(sentry_init, capture_items): integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1459,6 +1481,7 @@ def test_binary_content_encoding_image_url( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1538,6 +1561,7 @@ async def test_async_binary_content_encoding_image_url( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1618,6 +1642,7 @@ def test_binary_content_encoding_mixed_content( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1686,6 +1711,7 @@ async def test_async_binary_content_encoding_mixed_content( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1755,6 +1781,7 @@ def test_binary_content_encoding_uri_type( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1828,6 +1855,7 @@ async def test_async_binary_content_encoding_uri_type( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index e53f8e4f55..4c7df84b8b 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -138,6 +138,7 @@ def test_nonstreaming_chat_completion_no_prompts( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -233,6 +234,7 @@ def test_nonstreaming_chat_completion(sentry_init, capture_items, messages, requ integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -312,6 +314,7 @@ async def test_nonstreaming_chat_completion_async_no_prompts( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -407,6 +410,7 @@ async def test_nonstreaming_chat_completion_async( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -502,6 +506,7 @@ def test_streaming_chat_completion_no_prompts( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -621,6 +626,7 @@ def test_streaming_chat_completion_with_usage_in_stream( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -701,6 +707,7 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -764,6 +771,7 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -829,6 +837,7 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -957,6 +966,7 @@ def test_streaming_chat_completion( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1109,6 +1119,7 @@ async def test_streaming_chat_completion_async_no_prompts( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1280,6 +1291,7 @@ async def test_streaming_chat_completion_async( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1412,6 +1424,7 @@ async def test_streaming_chat_completion_async( def test_bad_chat_completion(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event") client = OpenAI(api_key="z") @@ -1430,6 +1443,7 @@ def test_bad_chat_completion(sentry_init, capture_items): def test_span_status_error(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction", "span") with start_transaction(name="test"): @@ -1455,6 +1469,7 @@ def test_span_status_error(sentry_init, capture_items): @pytest.mark.asyncio async def test_bad_chat_completion_async(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event") client = AsyncOpenAI(api_key="z") @@ -1485,6 +1500,7 @@ def test_embeddings_create_no_pii( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1567,6 +1583,7 @@ def test_embeddings_create(sentry_init, capture_items, input, request): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1638,6 +1655,7 @@ async def test_embeddings_create_async_no_pii( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1721,6 +1739,7 @@ async def test_embeddings_create_async(sentry_init, capture_items, input, reques integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1789,6 +1808,7 @@ def test_embeddings_create_raises_error( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -1817,6 +1837,7 @@ async def test_embeddings_create_raises_error_async( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -1837,6 +1858,7 @@ def test_span_origin_nonstreaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1860,6 +1882,7 @@ async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1882,6 +1905,7 @@ def test_span_origin_streaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1945,6 +1969,7 @@ async def test_span_origin_streaming_chat_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2011,6 +2036,7 @@ def test_span_origin_embeddings(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2042,6 +2068,7 @@ async def test_span_origin_embeddings_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2435,6 +2462,7 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -2557,6 +2585,7 @@ def test_ai_client_span_responses_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -2767,6 +2796,7 @@ def test_error_in_responses_api(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -2873,6 +2903,7 @@ async def test_ai_client_span_responses_async_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3158,6 +3189,7 @@ async def test_ai_client_span_streaming_responses_async_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3383,6 +3415,7 @@ async def test_error_in_responses_async_api(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -3510,6 +3543,7 @@ def test_streaming_responses_api( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3586,6 +3620,7 @@ async def test_streaming_responses_api_async( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3649,6 +3684,7 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_items, tools): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3691,6 +3727,7 @@ def test_openai_message_role_mapping( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3721,6 +3758,7 @@ def test_openai_message_truncation(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3770,6 +3808,7 @@ def test_streaming_chat_completion_ttft( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3848,6 +3887,7 @@ async def test_streaming_chat_completion_ttft_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3924,6 +3964,7 @@ def test_streaming_responses_api_ttft( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3973,6 +4014,7 @@ async def test_streaming_responses_api_ttft_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index 294812b0ca..9e74848a04 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -182,6 +182,7 @@ async def test_agent_invocation_span_no_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -339,6 +340,7 @@ async def test_agent_invocation_span( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -523,6 +525,7 @@ async def test_client_span_custom_model( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -570,6 +573,7 @@ def test_agent_invocation_span_sync_no_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -721,6 +725,7 @@ def test_agent_invocation_span_sync( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -963,6 +968,7 @@ async def test_handoff_span(sentry_init, capture_items, get_model_response): sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1092,6 +1098,7 @@ async def test_max_turns_before_handoff_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1190,6 +1197,7 @@ def simple_test_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1418,6 +1426,7 @@ async def test_hosted_mcp_tool_propagation_header_streamed( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, release="d08ebdb9309e1b004c6f52202de58a09c2268e42", + _experiments={"gen_ai_as_v2_spans": True}, ) request_headers = {} @@ -1580,6 +1589,7 @@ async def test_hosted_mcp_tool_propagation_headers( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, release="d08ebdb9309e1b004c6f52202de58a09c2268e42", + _experiments={"gen_ai_as_v2_spans": True}, ) response = get_model_response(EXAMPLE_RESPONSE, serialize_pydantic=True) @@ -1678,6 +1688,7 @@ def simple_test_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -1726,6 +1737,7 @@ async def test_error_handling(sentry_init, capture_items, test_agent): LoggingIntegration(event_level=logging.CRITICAL), ], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "span", "transaction") @@ -1791,6 +1803,7 @@ async def test_error_captures_input_data(sentry_init, capture_items, test_agent) ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "span") @@ -1835,6 +1848,7 @@ async def test_span_status_error(sentry_init, capture_items, test_agent): LoggingIntegration(event_level=logging.CRITICAL), ], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -1948,6 +1962,7 @@ async def test_mcp_tool_execution_spans( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2078,6 +2093,7 @@ async def test_mcp_tool_execution_with_error( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2202,6 +2218,7 @@ async def test_mcp_tool_execution_without_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2259,6 +2276,7 @@ async def test_multiple_agents_asyncio( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2302,6 +2320,7 @@ def test_openai_agents_message_role_mapping( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) get_response_kwargs = {"input": [test_message]} @@ -2401,6 +2420,7 @@ def failing_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2498,6 +2518,7 @@ async def test_invoke_agent_span_includes_usage_data( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2591,6 +2612,7 @@ async def test_ai_client_span_includes_response_model( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2679,6 +2701,7 @@ async def test_ai_client_span_response_model_with_chat_completions( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2801,6 +2824,7 @@ def calculator(a: int, b: int) -> int: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2889,6 +2913,7 @@ async def test_invoke_agent_span_includes_response_model( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3023,6 +3048,7 @@ def calculator(a: int, b: int) -> int: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3065,6 +3091,7 @@ def test_openai_agents_message_truncation(sentry_init, capture_items): integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_messages = [ @@ -3111,6 +3138,7 @@ async def test_streaming_span_update_captures_response_data( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create a mock streaming response object (similar to what we'd get from ResponseCompletedEvent) @@ -3176,6 +3204,7 @@ async def test_streaming_ttft_on_chat_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) request_headers = {} @@ -3330,6 +3359,7 @@ async def test_conversation_id_on_all_spans( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3468,6 +3498,7 @@ def simple_tool(message: str) -> str: sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3531,6 +3562,7 @@ async def test_no_conversation_id_when_not_provided( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index fe34dd0f5d..bab2f6208d 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -61,6 +61,7 @@ async def test_agent_run_async(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -102,6 +103,7 @@ async def test_agent_run_async_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -135,6 +137,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_items, get_test_a integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -179,6 +182,7 @@ def test_agent_run_sync(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -211,6 +215,7 @@ def test_agent_run_sync_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -244,6 +249,7 @@ async def test_agent_run_stream(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -288,6 +294,7 @@ async def test_agent_run_stream_events(sentry_init, capture_items, get_test_agen integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -322,6 +329,7 @@ async def test_agent_with_tools(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -387,6 +395,7 @@ async def test_agent_with_tool_model_retry( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) retries = 0 @@ -470,6 +479,7 @@ async def test_agent_with_tool_validation_error( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -534,6 +544,7 @@ async def test_agent_with_tools_streaming(sentry_init, capture_items, get_test_a integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -583,6 +594,7 @@ async def test_model_settings(sentry_init, capture_items, get_test_agent_with_se sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -631,6 +643,7 @@ async def test_system_prompt_attribute( integrations=[PydanticAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -676,6 +689,7 @@ async def test_error_handling(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -700,6 +714,7 @@ async def test_without_pii(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -729,6 +744,7 @@ async def test_without_pii_tools(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -765,6 +781,7 @@ async def test_multiple_agents_concurrent(sentry_init, capture_items, get_test_a sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -799,6 +816,7 @@ async def test_message_history(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -848,6 +866,7 @@ async def test_gen_ai_system(sentry_init, capture_items, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -878,6 +897,7 @@ async def test_include_prompts_false(sentry_init, capture_items, get_test_agent) integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, # Even with PII enabled, prompts should not be captured + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -907,6 +927,7 @@ async def test_include_prompts_true(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -938,6 +959,7 @@ async def test_include_prompts_false_with_tools( integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -975,6 +997,7 @@ async def test_include_prompts_requires_pii(sentry_init, capture_items, get_test integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1066,6 +1089,7 @@ async def mock_map_tool_result_part(part): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1135,6 +1159,7 @@ async def test_context_cleanup_after_run(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1158,6 +1183,7 @@ def test_context_cleanup_after_run_sync(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1182,6 +1208,7 @@ async def test_context_cleanup_after_streaming(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1208,6 +1235,7 @@ async def test_context_cleanup_on_error(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1242,6 +1270,7 @@ async def test_context_isolation_concurrent_agents(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create a second agent @@ -1297,6 +1326,7 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1347,6 +1377,7 @@ async def test_invoke_agent_with_instructions( integrations=[PydanticAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1386,6 +1417,7 @@ async def test_model_name_extraction_with_callable(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Test the utility function directly @@ -1412,6 +1444,7 @@ async def test_model_name_extraction_fallback_to_str(sentry_init, capture_items) sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Test the utility function directly @@ -1440,6 +1473,7 @@ async def test_model_settings_object_style(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1475,6 +1509,7 @@ async def test_usage_data_partial(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1508,6 +1543,7 @@ async def test_agent_data_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1530,6 +1566,7 @@ async def test_available_tools_without_description( sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1564,6 +1601,7 @@ async def test_output_with_tool_calls(sentry_init, capture_items, get_test_agent integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1609,6 +1647,7 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_item integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1657,6 +1696,7 @@ async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_it integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1684,6 +1724,7 @@ async def test_update_ai_client_span_with_none_response(sentry_init, capture_ite sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1709,6 +1750,7 @@ async def test_agent_without_name(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1734,6 +1776,7 @@ async def test_model_response_without_parts(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1764,6 +1807,7 @@ async def test_input_messages_error_handling(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1793,6 +1837,7 @@ async def test_available_tools_error_handling(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1822,6 +1867,7 @@ async def test_set_usage_data_with_none_usage(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1848,6 +1894,7 @@ async def test_set_usage_data_with_partial_fields(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1889,6 +1936,7 @@ def test_tool(x: int) -> int: integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1918,6 +1966,7 @@ async def test_message_parts_with_list_content(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1954,6 +2003,7 @@ async def test_output_data_with_text_and_tool_calls(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1993,6 +2043,7 @@ async def test_output_data_error_handling(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2025,6 +2076,7 @@ async def test_message_with_system_prompt_part(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2060,6 +2112,7 @@ async def test_message_with_instructions(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2094,6 +2147,7 @@ async def test_set_input_messages_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2122,6 +2176,7 @@ async def test_set_output_data_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2149,6 +2204,7 @@ async def test_get_model_name_with_exception_in_callable(sentry_init, capture_it sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create model with callable name that raises exception @@ -2172,6 +2228,7 @@ async def test_get_model_name_with_string_model(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Pass a string as model @@ -2191,6 +2248,7 @@ async def test_get_model_name_with_none(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Pass None @@ -2212,6 +2270,7 @@ async def test_set_model_data_with_system(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2243,6 +2302,7 @@ async def test_set_model_data_from_agent_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2276,6 +2336,7 @@ async def test_set_model_data_with_none_settings_values(sentry_init, capture_ite sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2308,6 +2369,7 @@ async def test_should_send_prompts_without_pii(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) # Should return False @@ -2326,6 +2388,7 @@ async def test_set_agent_data_without_agent(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2352,6 +2415,7 @@ async def test_set_agent_data_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2384,6 +2448,7 @@ async def test_set_agent_data_without_name(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2414,6 +2479,7 @@ async def test_set_available_tools_without_toolset(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2444,6 +2510,7 @@ async def test_set_available_tools_with_schema(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2483,6 +2550,7 @@ async def test_execute_tool_span_creation(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2509,6 +2577,7 @@ async def test_execute_tool_span_with_mcp_type(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2536,6 +2605,7 @@ async def test_execute_tool_span_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2560,6 +2630,7 @@ async def test_execute_tool_span_with_none_args(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2583,6 +2654,7 @@ async def test_update_execute_tool_span_with_none_span(sentry_init, capture_item sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Update with None span - should not raise @@ -2607,6 +2679,7 @@ async def test_update_execute_tool_span_with_none_result(sentry_init, capture_it integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2630,6 +2703,7 @@ async def test_tool_execution_without_span_context(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create a simple agent with no tools (won't have function_toolset) @@ -2661,6 +2735,7 @@ async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_ integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2694,6 +2769,7 @@ async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_i integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2722,6 +2798,7 @@ async def test_ai_client_span_with_streaming_flag(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2749,6 +2826,7 @@ async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2797,6 +2875,7 @@ async def test_binary_content_encoding_image(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2827,6 +2906,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_items) integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2870,6 +2950,7 @@ async def test_binary_content_in_agent_run(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2894,6 +2975,7 @@ async def test_binary_content_in_agent_run(sentry_init, capture_items): async def test_set_usage_data_with_cache_tokens(sentry_init, capture_items): """Test that cache_read_tokens and cache_write_tokens are tracked.""" sentry_init(integrations=[PydanticAIIntegration()], traces_sample_rate=1.0) + _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") @@ -2964,6 +3046,7 @@ def test_image_url_base64_content_in_span( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3034,6 +3117,7 @@ async def test_invoke_agent_image_url( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) agent = Agent("test", name="test_image_url_agent") @@ -3081,6 +3165,7 @@ def multiply_numbers(a: int, b: int) -> int: integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") From 51a07fff893c5c552de1950239b4a064dc48b828 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:47:07 +0200 Subject: [PATCH 30/36] push experiment --- sentry_sdk/consts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 73e5a6d9cb..82107b49ee 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -86,6 +86,7 @@ class CompressionAlgo(Enum): "trace_lifecycle": Optional[Literal["static", "stream"]], "ignore_spans": Optional[IgnoreSpansConfig], "suppress_asgi_chained_exceptions": Optional[bool], + "gen_ai_as_v2_spans": Optional[bool], }, total=False, ) From bab75670df741b84c3b17b8b615786705abdbabc Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:52:13 +0200 Subject: [PATCH 31/36] fix tests --- tests/tracing/test_decorator.py | 16 +++++++++++++--- tests/tracing/test_misc.py | 5 ++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/tests/tracing/test_decorator.py b/tests/tracing/test_decorator.py index 5f5adec2cb..d370b4bbc9 100644 --- a/tests/tracing/test_decorator.py +++ b/tests/tracing/test_decorator.py @@ -122,7 +122,10 @@ async def _some_function_traced(a, b, c): def test_span_templates_ai_dicts(sentry_init, capture_items): - sentry_init(traces_sample_rate=1.0) + sentry_init( + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) @@ -241,7 +244,10 @@ def my_agent(): def test_span_templates_ai_objects(sentry_init, capture_items): - sentry_init(traces_sample_rate=1.0) + sentry_init( + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) @@ -365,7 +371,11 @@ def my_agent(): @pytest.mark.parametrize("send_default_pii", [True, False]) def test_span_templates_ai_pii(sentry_init, capture_items, send_default_pii): - sentry_init(traces_sample_rate=1.0, send_default_pii=send_default_pii) + sentry_init( + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index 0188b08a88..4209a02b4b 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -650,7 +650,10 @@ def test_conversation_id_propagates_to_span_with_gen_ai_op( self, sentry_init, capture_items ): """Span with gen_ai.* op should get conversation_id.""" - sentry_init(traces_sample_rate=1.0) + sentry_init( + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("span") scope = sentry_sdk.get_current_scope() From 3e5579506264719625225e62271ab612c57afdc8 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:53:49 +0200 Subject: [PATCH 32/36] client changes --- sentry_sdk/client.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 0d13b6db03..f8bc071545 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -1113,10 +1113,14 @@ def capture_event( envelope = Envelope(headers=headers) - if is_transaction: + if is_transaction and not self.options["_experiments"].get( + "gen_ai_as_v2_spans", False + ): if isinstance(profile, Profile): envelope.add_profile(profile.to_json(event_opt, self.options)) + envelope.add_transaction(event_opt) + elif is_transaction: split_spans = _split_gen_ai_spans(event_opt) if split_spans is None or not split_spans[1]: envelope.add_transaction(event_opt) From 6d1d7edce94a5c20be9d32470ca1a385c0d199be Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 14:55:22 +0200 Subject: [PATCH 33/36] simplify client logic --- sentry_sdk/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index f8bc071545..87504c94b1 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -1113,12 +1113,12 @@ def capture_event( envelope = Envelope(headers=headers) + if is_transaction and isinstance(profile, Profile): + envelope.add_profile(profile.to_json(event_opt, self.options)) + if is_transaction and not self.options["_experiments"].get( "gen_ai_as_v2_spans", False ): - if isinstance(profile, Profile): - envelope.add_profile(profile.to_json(event_opt, self.options)) - envelope.add_transaction(event_opt) elif is_transaction: split_spans = _split_gen_ai_spans(event_opt) From 6bf400680527c779dc13421df181daab2fb09e7e Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 15:07:42 +0200 Subject: [PATCH 34/36] Revert "add experimental v2 option" This reverts commit 6c5c812faa8879523fb4f90c650327a7f70a1d81. --- .../integrations/anthropic/test_anthropic.py | 53 ------------ .../google_genai/test_google_genai.py | 37 -------- .../huggingface_hub/test_huggingface_hub.py | 8 -- .../integrations/langchain/test_langchain.py | 26 ------ tests/integrations/litellm/test_litellm.py | 28 ------ tests/integrations/openai/test_openai.py | 42 --------- .../openai_agents/test_openai_agents.py | 32 ------- .../pydantic_ai/test_pydantic_ai.py | 85 ------------------- 8 files changed, 311 deletions(-) diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index aedab1578b..c7fc280b6c 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -97,7 +97,6 @@ def test_nonstreaming_create_message( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -172,7 +171,6 @@ async def test_nonstreaming_create_message_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") @@ -289,7 +287,6 @@ def test_streaming_create_message( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -398,7 +395,6 @@ def test_streaming_create_message_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -502,7 +498,6 @@ def test_streaming_create_message_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -619,7 +614,6 @@ def test_stream_messages( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -729,7 +723,6 @@ def test_stream_messages_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -838,7 +831,6 @@ def test_stream_messages_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -961,7 +953,6 @@ async def test_streaming_create_message_async( traces_sample_rate=1.0, default_integrations=False, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1073,7 +1064,6 @@ async def test_streaming_create_message_async_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1180,7 +1170,6 @@ async def test_streaming_create_message_async_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1301,7 +1290,6 @@ async def test_stream_message_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1412,7 +1400,6 @@ async def test_stream_messages_async_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1523,7 +1510,6 @@ async def test_stream_messages_async_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1680,7 +1666,6 @@ def test_streaming_create_message_with_input_json_delta( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1830,7 +1815,6 @@ def test_stream_messages_with_input_json_delta( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1988,7 +1972,6 @@ async def test_streaming_create_message_with_input_json_delta_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2146,7 +2129,6 @@ async def test_stream_message_with_input_json_delta_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2206,7 +2188,6 @@ async def test_stream_message_with_input_json_delta_async( def test_exception_message_create(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction") client = Anthropic(api_key="z") @@ -2229,7 +2210,6 @@ def test_exception_message_create(sentry_init, capture_items): def test_span_status_error(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "span") with start_transaction(name="anthropic"): @@ -2256,7 +2236,6 @@ def test_span_status_error(sentry_init, capture_items): @pytest.mark.asyncio async def test_span_status_error_async(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "span") with start_transaction(name="anthropic"): @@ -2283,7 +2262,6 @@ async def test_span_status_error_async(sentry_init, capture_items): @pytest.mark.asyncio async def test_exception_message_create_async(sentry_init, capture_items): sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction") client = AsyncAnthropic(api_key="z") @@ -2308,7 +2286,6 @@ def test_span_origin(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2339,7 +2316,6 @@ async def test_span_origin_async(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2403,7 +2379,6 @@ def test_set_output_data_with_input_json_delta(sentry_init): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with start_transaction(name="test"): @@ -2454,7 +2429,6 @@ def test_anthropic_message_role_mapping( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2501,7 +2475,6 @@ def test_anthropic_message_truncation(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2552,7 +2525,6 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2613,7 +2585,6 @@ def test_nonstreaming_create_message_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -2700,7 +2671,6 @@ async def test_nonstreaming_create_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") @@ -2830,7 +2800,6 @@ def test_streaming_create_message_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2961,7 +2930,6 @@ def test_stream_messages_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3094,7 +3062,6 @@ async def test_stream_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3227,7 +3194,6 @@ async def test_streaming_create_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3303,7 +3269,6 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3557,7 +3522,6 @@ def test_message_with_base64_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3608,7 +3572,6 @@ def test_message_with_url_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3652,7 +3615,6 @@ def test_message_with_file_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3697,7 +3659,6 @@ def test_message_with_base64_pdf(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3742,7 +3703,6 @@ def test_message_with_url_pdf(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3786,7 +3746,6 @@ def test_message_with_file_document(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3831,7 +3790,6 @@ def test_message_with_mixed_content(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3914,7 +3872,6 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_ite integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3989,7 +3946,6 @@ def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_items) integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4028,7 +3984,6 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_it integrations=[AnthropicIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4064,7 +4019,6 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_it def test_cache_tokens_nonstreaming(sentry_init, capture_items): """Test cache read/write tokens are tracked for non-streaming responses.""" sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4113,7 +4067,6 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_item cache_creation_input_tokens=2846, cache_read_input_tokens=0) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4162,7 +4115,6 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_items cache_creation_input_tokens=0, cache_read_input_tokens=2846) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4240,7 +4192,6 @@ def test_input_tokens_include_cache_read_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -4307,7 +4258,6 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -4341,7 +4291,6 @@ def test_input_tokens_unchanged_without_caching(sentry_init, capture_items): Usage(input_tokens=20, output_tokens=12) """ sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4410,7 +4359,6 @@ def test_cache_tokens_streaming( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -4471,7 +4419,6 @@ def test_stream_messages_cache_tokens( ) sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py index ae31fe565b..e074b79c8c 100644 --- a/tests/integrations/google_genai/test_google_genai.py +++ b/tests/integrations/google_genai/test_google_genai.py @@ -130,7 +130,6 @@ def test_nonstreaming_generate_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -220,7 +219,6 @@ def test_generate_content_with_system_instruction( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -264,7 +262,6 @@ def test_generate_content_with_tools(sentry_init, capture_items, mock_genai_clie sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -347,7 +344,6 @@ def test_tool_execution(sentry_init, capture_items): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -384,7 +380,6 @@ def test_error_handling(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction") @@ -416,7 +411,6 @@ def test_streaming_generate_content(sentry_init, capture_items, mock_genai_clien integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -533,7 +527,6 @@ def test_span_origin(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -561,7 +554,6 @@ def test_response_without_usage_metadata(sentry_init, capture_items, mock_genai_ sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -603,7 +595,6 @@ def test_multiple_candidates(sentry_init, capture_items, mock_genai_client): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -668,7 +659,6 @@ def test_all_configuration_parameters(sentry_init, capture_items, mock_genai_cli sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -708,7 +698,6 @@ def test_empty_response(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -740,7 +729,6 @@ def test_response_with_different_id_fields( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -783,7 +771,6 @@ def test_tool_with_async_function(sentry_init): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Create an async tool function @@ -806,7 +793,6 @@ def test_contents_as_none(sentry_init, capture_items, mock_genai_client): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -833,7 +819,6 @@ def test_tool_calls_extraction(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -920,7 +905,6 @@ def test_google_genai_message_truncation(sentry_init, capture_items, mock_genai_ integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -996,7 +980,6 @@ def test_embed_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1058,7 +1041,6 @@ def test_embed_content_string_input(sentry_init, capture_items, mock_genai_clien integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1105,7 +1087,6 @@ def test_embed_content_error_handling(sentry_init, capture_items, mock_genai_cli sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "event") @@ -1139,7 +1120,6 @@ def test_embed_content_without_statistics( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1179,7 +1159,6 @@ def test_embed_content_span_origin(sentry_init, capture_items, mock_genai_client sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1220,7 +1199,6 @@ async def test_async_embed_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1285,7 +1263,6 @@ async def test_async_embed_content_string_input( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1335,7 +1312,6 @@ async def test_async_embed_content_error_handling( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "event") @@ -1370,7 +1346,6 @@ async def test_async_embed_content_without_statistics( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1413,7 +1388,6 @@ async def test_async_embed_content_span_origin( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1445,7 +1419,6 @@ def test_generate_content_with_content_object( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1482,7 +1455,6 @@ def test_generate_content_with_dict_format( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1515,7 +1487,6 @@ def test_generate_content_with_file_data(sentry_init, capture_items, mock_genai_ integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1565,7 +1536,6 @@ def test_generate_content_with_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1611,7 +1581,6 @@ def test_generate_content_with_function_response( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1666,7 +1635,6 @@ def test_generate_content_with_mixed_string_and_content( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1710,7 +1678,6 @@ def test_generate_content_with_part_object_directly( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1749,7 +1716,6 @@ def test_generate_content_with_list_of_dicts( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1786,7 +1752,6 @@ def test_generate_content_with_dict_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1831,7 +1796,6 @@ def test_generate_content_without_parts_property_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1875,7 +1839,6 @@ def test_generate_content_without_parts_property_inline_data_and_binary_data_wit integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 16c27b678d..98abbb00fa 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -480,7 +480,6 @@ def test_text_generation( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -556,7 +555,6 @@ def test_text_generation_streaming( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -633,7 +631,6 @@ def test_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -712,7 +709,6 @@ def test_chat_completion_streaming( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -784,7 +780,6 @@ def test_chat_completion_api_error( sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: sentry_init(traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -844,7 +839,6 @@ def test_span_status_error( sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: sentry_init(traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -887,7 +881,6 @@ def test_chat_completion_with_tools( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -983,7 +976,6 @@ def test_chat_completion_streaming_with_tools( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 5002d050b9..f709d12129 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -108,7 +108,6 @@ def test_langchain_text_completion( ], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -217,7 +216,6 @@ def test_langchain_create_agent( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -338,7 +336,6 @@ def test_tool_execution_span( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -531,7 +528,6 @@ def test_langchain_openai_tools_agent( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -869,7 +865,6 @@ def test_langchain_error(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -908,7 +903,6 @@ def test_span_status_error(sentry_init, capture_items): sentry_init( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -994,7 +988,6 @@ def _identifying_params(self): return {} sentry_init(integrations=[LangchainIntegration()]) - _experiments = ({"gen_ai_as_v2_spans": True},) # Create a manual SentryLangchainCallback manual_callback = SentryLangchainCallback( @@ -1035,7 +1028,6 @@ def test_langchain_callback_manager(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) local_manager = BaseCallbackManager(handlers=[]) @@ -1068,7 +1060,6 @@ def test_langchain_callback_manager_with_sentry_callback(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) sentry_callback = SentryLangchainCallback(0, False) local_manager = BaseCallbackManager(handlers=[sentry_callback]) @@ -1101,7 +1092,6 @@ def test_langchain_callback_list(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) local_callbacks = [] @@ -1134,7 +1124,6 @@ def test_langchain_callback_list_existing_callback(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) sentry_callback = SentryLangchainCallback(0, False) local_callbacks = [sentry_callback] @@ -1172,7 +1161,6 @@ def test_langchain_message_role_mapping(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1310,7 +1298,6 @@ def test_langchain_message_truncation(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1403,7 +1390,6 @@ def test_langchain_embeddings_sync( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1482,7 +1468,6 @@ def test_langchain_embeddings_embed_query( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1557,7 +1542,6 @@ async def test_langchain_embeddings_async( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1630,7 +1614,6 @@ async def test_langchain_embeddings_aembed_query(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1692,7 +1675,6 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_items): sentry_init( integrations=[LangchainIntegration(include_prompts=False)], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1742,7 +1724,6 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_items): # Initialize without LangchainIntegration sentry_init(traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") with mock.patch.object( @@ -1779,7 +1760,6 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1837,7 +1817,6 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1878,7 +1857,6 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1942,7 +1920,6 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1996,7 +1973,6 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_i integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2061,7 +2037,6 @@ def test_langchain_response_model_extraction( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2367,7 +2342,6 @@ def test_langchain_ai_system_detection( sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/litellm/test_litellm.py b/tests/integrations/litellm/test_litellm.py index b9365e7008..90807744e7 100644 --- a/tests/integrations/litellm/test_litellm.py +++ b/tests/integrations/litellm/test_litellm.py @@ -152,7 +152,6 @@ def test_nonstreaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -234,7 +233,6 @@ async def test_async_nonstreaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -318,7 +316,6 @@ def test_streaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -389,7 +386,6 @@ async def test_async_streaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -456,7 +452,6 @@ def test_embeddings_create( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -526,7 +521,6 @@ async def test_async_embeddings_create( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -591,7 +585,6 @@ def test_embeddings_create_with_list_input( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -654,7 +647,6 @@ async def test_async_embeddings_create_with_list_input( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -717,7 +709,6 @@ def test_embeddings_no_pii( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -774,7 +765,6 @@ async def test_async_embeddings_no_pii( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -825,7 +815,6 @@ def test_exception_handling( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -864,7 +853,6 @@ async def test_async_exception_handling( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -906,7 +894,6 @@ def test_span_origin( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -954,7 +941,6 @@ def test_multiple_providers( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction") @@ -1050,7 +1036,6 @@ async def test_async_multiple_providers( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1147,7 +1132,6 @@ def test_additional_parameters( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1207,7 +1191,6 @@ async def test_async_additional_parameters( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1267,7 +1250,6 @@ def test_no_integration( """Test that when integration is not enabled, callbacks don't break.""" sentry_init( traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1314,7 +1296,6 @@ async def test_async_no_integration( """Test that when integration is not enabled, callbacks don't break.""" sentry_init( traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1357,7 +1338,6 @@ def test_response_without_usage(sentry_init, capture_items): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1399,7 +1379,6 @@ def test_integration_setup(sentry_init): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Check that callbacks are registered @@ -1414,7 +1393,6 @@ def test_litellm_message_truncation(sentry_init, capture_items): integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1481,7 +1459,6 @@ def test_binary_content_encoding_image_url( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1561,7 +1538,6 @@ async def test_async_binary_content_encoding_image_url( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1642,7 +1618,6 @@ def test_binary_content_encoding_mixed_content( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1711,7 +1686,6 @@ async def test_async_binary_content_encoding_mixed_content( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1781,7 +1755,6 @@ def test_binary_content_encoding_uri_type( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1855,7 +1828,6 @@ async def test_async_binary_content_encoding_uri_type( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 4c7df84b8b..e53f8e4f55 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -138,7 +138,6 @@ def test_nonstreaming_chat_completion_no_prompts( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -234,7 +233,6 @@ def test_nonstreaming_chat_completion(sentry_init, capture_items, messages, requ integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -314,7 +312,6 @@ async def test_nonstreaming_chat_completion_async_no_prompts( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -410,7 +407,6 @@ async def test_nonstreaming_chat_completion_async( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -506,7 +502,6 @@ def test_streaming_chat_completion_no_prompts( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -626,7 +621,6 @@ def test_streaming_chat_completion_with_usage_in_stream( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -707,7 +701,6 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -771,7 +764,6 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -837,7 +829,6 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -966,7 +957,6 @@ def test_streaming_chat_completion( ], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1119,7 +1109,6 @@ async def test_streaming_chat_completion_async_no_prompts( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1291,7 +1280,6 @@ async def test_streaming_chat_completion_async( ], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1424,7 +1412,6 @@ async def test_streaming_chat_completion_async( def test_bad_chat_completion(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event") client = OpenAI(api_key="z") @@ -1443,7 +1430,6 @@ def test_bad_chat_completion(sentry_init, capture_items): def test_span_status_error(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event", "transaction", "span") with start_transaction(name="test"): @@ -1469,7 +1455,6 @@ def test_span_status_error(sentry_init, capture_items): @pytest.mark.asyncio async def test_bad_chat_completion_async(sentry_init, capture_items): sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("event") client = AsyncOpenAI(api_key="z") @@ -1500,7 +1485,6 @@ def test_embeddings_create_no_pii( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1583,7 +1567,6 @@ def test_embeddings_create(sentry_init, capture_items, input, request): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1655,7 +1638,6 @@ async def test_embeddings_create_async_no_pii( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1739,7 +1721,6 @@ async def test_embeddings_create_async(sentry_init, capture_items, input, reques integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1808,7 +1789,6 @@ def test_embeddings_create_raises_error( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -1837,7 +1817,6 @@ async def test_embeddings_create_raises_error_async( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -1858,7 +1837,6 @@ def test_span_origin_nonstreaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1882,7 +1860,6 @@ async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1905,7 +1882,6 @@ def test_span_origin_streaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1969,7 +1945,6 @@ async def test_span_origin_streaming_chat_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2036,7 +2011,6 @@ def test_span_origin_embeddings(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2068,7 +2042,6 @@ async def test_span_origin_embeddings_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2462,7 +2435,6 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -2585,7 +2557,6 @@ def test_ai_client_span_responses_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -2796,7 +2767,6 @@ def test_error_in_responses_api(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -2903,7 +2873,6 @@ async def test_ai_client_span_responses_async_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3189,7 +3158,6 @@ async def test_ai_client_span_streaming_responses_async_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3415,7 +3383,6 @@ async def test_error_in_responses_async_api(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -3543,7 +3510,6 @@ def test_streaming_responses_api( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3620,7 +3586,6 @@ async def test_streaming_responses_api_async( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3684,7 +3649,6 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_items, tools): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3727,7 +3691,6 @@ def test_openai_message_role_mapping( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3758,7 +3721,6 @@ def test_openai_message_truncation(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3808,7 +3770,6 @@ def test_streaming_chat_completion_ttft( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3887,7 +3848,6 @@ async def test_streaming_chat_completion_ttft_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3964,7 +3924,6 @@ def test_streaming_responses_api_ttft( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -4014,7 +3973,6 @@ async def test_streaming_responses_api_ttft_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index 9e74848a04..294812b0ca 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -182,7 +182,6 @@ async def test_agent_invocation_span_no_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -340,7 +339,6 @@ async def test_agent_invocation_span( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -525,7 +523,6 @@ async def test_client_span_custom_model( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -573,7 +570,6 @@ def test_agent_invocation_span_sync_no_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -725,7 +721,6 @@ def test_agent_invocation_span_sync( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -968,7 +963,6 @@ async def test_handoff_span(sentry_init, capture_items, get_model_response): sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1098,7 +1092,6 @@ async def test_max_turns_before_handoff_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1197,7 +1190,6 @@ def simple_test_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1426,7 +1418,6 @@ async def test_hosted_mcp_tool_propagation_header_streamed( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, release="d08ebdb9309e1b004c6f52202de58a09c2268e42", - _experiments={"gen_ai_as_v2_spans": True}, ) request_headers = {} @@ -1589,7 +1580,6 @@ async def test_hosted_mcp_tool_propagation_headers( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, release="d08ebdb9309e1b004c6f52202de58a09c2268e42", - _experiments={"gen_ai_as_v2_spans": True}, ) response = get_model_response(EXAMPLE_RESPONSE, serialize_pydantic=True) @@ -1688,7 +1678,6 @@ def simple_test_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -1737,7 +1726,6 @@ async def test_error_handling(sentry_init, capture_items, test_agent): LoggingIntegration(event_level=logging.CRITICAL), ], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "span", "transaction") @@ -1803,7 +1791,6 @@ async def test_error_captures_input_data(sentry_init, capture_items, test_agent) ], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "span") @@ -1848,7 +1835,6 @@ async def test_span_status_error(sentry_init, capture_items, test_agent): LoggingIntegration(event_level=logging.CRITICAL), ], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -1962,7 +1948,6 @@ async def test_mcp_tool_execution_spans( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2093,7 +2078,6 @@ async def test_mcp_tool_execution_with_error( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2218,7 +2202,6 @@ async def test_mcp_tool_execution_without_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, # PII disabled - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2276,7 +2259,6 @@ async def test_multiple_agents_asyncio( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2320,7 +2302,6 @@ def test_openai_agents_message_role_mapping( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) get_response_kwargs = {"input": [test_message]} @@ -2420,7 +2401,6 @@ def failing_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2518,7 +2498,6 @@ async def test_invoke_agent_span_includes_usage_data( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2612,7 +2591,6 @@ async def test_ai_client_span_includes_response_model( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2701,7 +2679,6 @@ async def test_ai_client_span_response_model_with_chat_completions( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2824,7 +2801,6 @@ def calculator(a: int, b: int) -> int: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2913,7 +2889,6 @@ async def test_invoke_agent_span_includes_response_model( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3048,7 +3023,6 @@ def calculator(a: int, b: int) -> int: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3091,7 +3065,6 @@ def test_openai_agents_message_truncation(sentry_init, capture_items): integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) test_messages = [ @@ -3138,7 +3111,6 @@ async def test_streaming_span_update_captures_response_data( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) # Create a mock streaming response object (similar to what we'd get from ResponseCompletedEvent) @@ -3204,7 +3176,6 @@ async def test_streaming_ttft_on_chat_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) request_headers = {} @@ -3359,7 +3330,6 @@ async def test_conversation_id_on_all_spans( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3498,7 +3468,6 @@ def simple_tool(message: str) -> str: sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3562,7 +3531,6 @@ async def test_no_conversation_id_when_not_provided( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index bab2f6208d..fe34dd0f5d 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -61,7 +61,6 @@ async def test_agent_run_async(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -103,7 +102,6 @@ async def test_agent_run_async_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -137,7 +135,6 @@ async def test_agent_run_async_usage_data(sentry_init, capture_items, get_test_a integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -182,7 +179,6 @@ def test_agent_run_sync(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -215,7 +211,6 @@ def test_agent_run_sync_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -249,7 +244,6 @@ async def test_agent_run_stream(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -294,7 +288,6 @@ async def test_agent_run_stream_events(sentry_init, capture_items, get_test_agen integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -329,7 +322,6 @@ async def test_agent_with_tools(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -395,7 +387,6 @@ async def test_agent_with_tool_model_retry( ], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) retries = 0 @@ -479,7 +470,6 @@ async def test_agent_with_tool_validation_error( ], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -544,7 +534,6 @@ async def test_agent_with_tools_streaming(sentry_init, capture_items, get_test_a integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -594,7 +583,6 @@ async def test_model_settings(sentry_init, capture_items, get_test_agent_with_se sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -643,7 +631,6 @@ async def test_system_prompt_attribute( integrations=[PydanticAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -689,7 +676,6 @@ async def test_error_handling(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -714,7 +700,6 @@ async def test_without_pii(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -744,7 +729,6 @@ async def test_without_pii_tools(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=False, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -781,7 +765,6 @@ async def test_multiple_agents_concurrent(sentry_init, capture_items, get_test_a sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -816,7 +799,6 @@ async def test_message_history(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -866,7 +848,6 @@ async def test_gen_ai_system(sentry_init, capture_items, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -897,7 +878,6 @@ async def test_include_prompts_false(sentry_init, capture_items, get_test_agent) integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, # Even with PII enabled, prompts should not be captured - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -927,7 +907,6 @@ async def test_include_prompts_true(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -959,7 +938,6 @@ async def test_include_prompts_false_with_tools( integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -997,7 +975,6 @@ async def test_include_prompts_requires_pii(sentry_init, capture_items, get_test integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1089,7 +1066,6 @@ async def mock_map_tool_result_part(part): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1159,7 +1135,6 @@ async def test_context_cleanup_after_run(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1183,7 +1158,6 @@ def test_context_cleanup_after_run_sync(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1208,7 +1182,6 @@ async def test_context_cleanup_after_streaming(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1235,7 +1208,6 @@ async def test_context_cleanup_on_error(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1270,7 +1242,6 @@ async def test_context_isolation_concurrent_agents(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Create a second agent @@ -1326,7 +1297,6 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1377,7 +1347,6 @@ async def test_invoke_agent_with_instructions( integrations=[PydanticAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1417,7 +1386,6 @@ async def test_model_name_extraction_with_callable(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Test the utility function directly @@ -1444,7 +1412,6 @@ async def test_model_name_extraction_fallback_to_str(sentry_init, capture_items) sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Test the utility function directly @@ -1473,7 +1440,6 @@ async def test_model_settings_object_style(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1509,7 +1475,6 @@ async def test_usage_data_partial(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1543,7 +1508,6 @@ async def test_agent_data_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1566,7 +1530,6 @@ async def test_available_tools_without_description( sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1601,7 +1564,6 @@ async def test_output_with_tool_calls(sentry_init, capture_items, get_test_agent integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1647,7 +1609,6 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_item integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1696,7 +1657,6 @@ async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_it integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1724,7 +1684,6 @@ async def test_update_ai_client_span_with_none_response(sentry_init, capture_ite sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1750,7 +1709,6 @@ async def test_agent_without_name(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1776,7 +1734,6 @@ async def test_model_response_without_parts(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1807,7 +1764,6 @@ async def test_input_messages_error_handling(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1837,7 +1793,6 @@ async def test_available_tools_error_handling(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1867,7 +1822,6 @@ async def test_set_usage_data_with_none_usage(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1894,7 +1848,6 @@ async def test_set_usage_data_with_partial_fields(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1936,7 +1889,6 @@ def test_tool(x: int) -> int: integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1966,7 +1918,6 @@ async def test_message_parts_with_list_content(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2003,7 +1954,6 @@ async def test_output_data_with_text_and_tool_calls(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2043,7 +1993,6 @@ async def test_output_data_error_handling(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2076,7 +2025,6 @@ async def test_message_with_system_prompt_part(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2112,7 +2060,6 @@ async def test_message_with_instructions(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2147,7 +2094,6 @@ async def test_set_input_messages_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2176,7 +2122,6 @@ async def test_set_output_data_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2204,7 +2149,6 @@ async def test_get_model_name_with_exception_in_callable(sentry_init, capture_it sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Create model with callable name that raises exception @@ -2228,7 +2172,6 @@ async def test_get_model_name_with_string_model(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Pass a string as model @@ -2248,7 +2191,6 @@ async def test_get_model_name_with_none(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Pass None @@ -2270,7 +2212,6 @@ async def test_set_model_data_with_system(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2302,7 +2243,6 @@ async def test_set_model_data_from_agent_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2336,7 +2276,6 @@ async def test_set_model_data_with_none_settings_values(sentry_init, capture_ite sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2369,7 +2308,6 @@ async def test_should_send_prompts_without_pii(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled - _experiments={"gen_ai_as_v2_spans": True}, ) # Should return False @@ -2388,7 +2326,6 @@ async def test_set_agent_data_without_agent(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2415,7 +2352,6 @@ async def test_set_agent_data_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2448,7 +2384,6 @@ async def test_set_agent_data_without_name(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2479,7 +2414,6 @@ async def test_set_available_tools_without_toolset(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2510,7 +2444,6 @@ async def test_set_available_tools_with_schema(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2550,7 +2483,6 @@ async def test_execute_tool_span_creation(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2577,7 +2509,6 @@ async def test_execute_tool_span_with_mcp_type(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2605,7 +2536,6 @@ async def test_execute_tool_span_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2630,7 +2560,6 @@ async def test_execute_tool_span_with_none_args(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2654,7 +2583,6 @@ async def test_update_execute_tool_span_with_none_span(sentry_init, capture_item sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Update with None span - should not raise @@ -2679,7 +2607,6 @@ async def test_update_execute_tool_span_with_none_result(sentry_init, capture_it integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2703,7 +2630,6 @@ async def test_tool_execution_without_span_context(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) # Create a simple agent with no tools (won't have function_toolset) @@ -2735,7 +2661,6 @@ async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_ integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2769,7 +2694,6 @@ async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_i integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2798,7 +2722,6 @@ async def test_ai_client_span_with_streaming_flag(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2826,7 +2749,6 @@ async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, - _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2875,7 +2797,6 @@ async def test_binary_content_encoding_image(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2906,7 +2827,6 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_items) integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2950,7 +2870,6 @@ async def test_binary_content_in_agent_run(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2975,7 +2894,6 @@ async def test_binary_content_in_agent_run(sentry_init, capture_items): async def test_set_usage_data_with_cache_tokens(sentry_init, capture_items): """Test that cache_read_tokens and cache_write_tokens are tracked.""" sentry_init(integrations=[PydanticAIIntegration()], traces_sample_rate=1.0) - _experiments = ({"gen_ai_as_v2_spans": True},) items = capture_items("transaction", "span") @@ -3046,7 +2964,6 @@ def test_image_url_base64_content_in_span( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3117,7 +3034,6 @@ async def test_invoke_agent_image_url( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) agent = Agent("test", name="test_image_url_agent") @@ -3165,7 +3081,6 @@ def multiply_numbers(a: int, b: int) -> int: integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, - _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") From 700e8a17934b20734797472a9270e054b8c1bb90 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 15:09:05 +0200 Subject: [PATCH 35/36] retry adding experimental option to tests --- .../integrations/anthropic/test_anthropic.py | 113 ++++++++++++++++-- .../google_genai/test_google_genai.py | 37 ++++++ .../huggingface_hub/test_huggingface_hub.py | 10 +- .../integrations/langchain/test_langchain.py | 30 ++++- tests/integrations/litellm/test_litellm.py | 28 +++++ tests/integrations/openai/test_openai.py | 57 ++++++++- .../openai_agents/test_openai_agents.py | 32 +++++ .../pydantic_ai/test_pydantic_ai.py | 90 +++++++++++++- 8 files changed, 377 insertions(+), 20 deletions(-) diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index c7fc280b6c..b19cca9347 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -97,6 +97,7 @@ def test_nonstreaming_create_message( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -171,6 +172,7 @@ async def test_nonstreaming_create_message_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") @@ -287,6 +289,7 @@ def test_streaming_create_message( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -395,6 +398,7 @@ def test_streaming_create_message_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -498,6 +502,7 @@ def test_streaming_create_message_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -614,6 +619,7 @@ def test_stream_messages( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -723,6 +729,7 @@ def test_stream_messages_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -831,6 +838,7 @@ def test_stream_messages_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -953,6 +961,7 @@ async def test_streaming_create_message_async( traces_sample_rate=1.0, default_integrations=False, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1064,6 +1073,7 @@ async def test_streaming_create_message_async_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1170,6 +1180,7 @@ async def test_streaming_create_message_async_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1290,6 +1301,7 @@ async def test_stream_message_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1400,6 +1412,7 @@ async def test_stream_messages_async_api_error( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1510,6 +1523,7 @@ async def test_stream_messages_async_close( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1666,6 +1680,7 @@ def test_streaming_create_message_with_input_json_delta( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1815,6 +1830,7 @@ def test_stream_messages_with_input_json_delta( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1972,6 +1988,7 @@ async def test_streaming_create_message_with_input_json_delta_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2129,6 +2146,7 @@ async def test_stream_message_with_input_json_delta_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2187,7 +2205,11 @@ async def test_stream_message_with_input_json_delta_async( def test_exception_message_create(sentry_init, capture_items): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event", "transaction") client = Anthropic(api_key="z") @@ -2209,7 +2231,11 @@ def test_exception_message_create(sentry_init, capture_items): def test_span_status_error(sentry_init, capture_items): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event", "span") with start_transaction(name="anthropic"): @@ -2235,7 +2261,11 @@ def test_span_status_error(sentry_init, capture_items): @pytest.mark.asyncio async def test_span_status_error_async(sentry_init, capture_items): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event", "span") with start_transaction(name="anthropic"): @@ -2261,7 +2291,11 @@ async def test_span_status_error_async(sentry_init, capture_items): @pytest.mark.asyncio async def test_exception_message_create_async(sentry_init, capture_items): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event", "transaction") client = AsyncAnthropic(api_key="z") @@ -2286,6 +2320,7 @@ def test_span_origin(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2316,6 +2351,7 @@ async def test_span_origin_async(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2379,6 +2415,7 @@ def test_set_output_data_with_input_json_delta(sentry_init): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with start_transaction(name="test"): @@ -2429,6 +2466,7 @@ def test_anthropic_message_role_mapping( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2475,6 +2513,7 @@ def test_anthropic_message_truncation(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2525,6 +2564,7 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2585,6 +2625,7 @@ def test_nonstreaming_create_message_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -2671,6 +2712,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") @@ -2800,6 +2842,7 @@ def test_streaming_create_message_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2930,6 +2973,7 @@ def test_stream_messages_with_system_prompt( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3062,6 +3106,7 @@ async def test_stream_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3194,6 +3239,7 @@ async def test_streaming_create_message_with_system_prompt_async( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3269,6 +3315,7 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3522,6 +3569,7 @@ def test_message_with_base64_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3572,6 +3620,7 @@ def test_message_with_url_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3615,6 +3664,7 @@ def test_message_with_file_image(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3659,6 +3709,7 @@ def test_message_with_base64_pdf(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3703,6 +3754,7 @@ def test_message_with_url_pdf(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3746,6 +3798,7 @@ def test_message_with_file_document(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3790,6 +3843,7 @@ def test_message_with_mixed_content(sentry_init, capture_items): integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3872,6 +3926,7 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_ite integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3946,6 +4001,7 @@ def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_items) integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -3984,6 +4040,7 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_it integrations=[AnthropicIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4018,7 +4075,11 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_it def test_cache_tokens_nonstreaming(sentry_init, capture_items): """Test cache read/write tokens are tracked for non-streaming responses.""" - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4066,7 +4127,11 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_item Usage(input_tokens=19, output_tokens=14, cache_creation_input_tokens=2846, cache_read_input_tokens=0) """ - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4114,7 +4179,11 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_items Usage(input_tokens=19, output_tokens=14, cache_creation_input_tokens=0, cache_read_input_tokens=2846) """ - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4191,7 +4260,11 @@ def test_input_tokens_include_cache_read_streaming( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") with mock.patch.object( @@ -4257,7 +4330,11 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") with mock.patch.object( @@ -4290,7 +4367,11 @@ def test_input_tokens_unchanged_without_caching(sentry_init, capture_items): Real Anthropic response (from E2E test, simple call without caching): Usage(input_tokens=20, output_tokens=12) """ - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -4358,7 +4439,11 @@ def test_cache_tokens_streaming( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") with mock.patch.object( @@ -4418,7 +4503,11 @@ def test_stream_messages_cache_tokens( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") with mock.patch.object( diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py index e074b79c8c..ae31fe565b 100644 --- a/tests/integrations/google_genai/test_google_genai.py +++ b/tests/integrations/google_genai/test_google_genai.py @@ -130,6 +130,7 @@ def test_nonstreaming_generate_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -219,6 +220,7 @@ def test_generate_content_with_system_instruction( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -262,6 +264,7 @@ def test_generate_content_with_tools(sentry_init, capture_items, mock_genai_clie sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -344,6 +347,7 @@ def test_tool_execution(sentry_init, capture_items): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -380,6 +384,7 @@ def test_error_handling(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction") @@ -411,6 +416,7 @@ def test_streaming_generate_content(sentry_init, capture_items, mock_genai_clien integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -527,6 +533,7 @@ def test_span_origin(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -554,6 +561,7 @@ def test_response_without_usage_metadata(sentry_init, capture_items, mock_genai_ sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -595,6 +603,7 @@ def test_multiple_candidates(sentry_init, capture_items, mock_genai_client): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -659,6 +668,7 @@ def test_all_configuration_parameters(sentry_init, capture_items, mock_genai_cli sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -698,6 +708,7 @@ def test_empty_response(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -729,6 +740,7 @@ def test_response_with_different_id_fields( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -771,6 +783,7 @@ def test_tool_with_async_function(sentry_init): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create an async tool function @@ -793,6 +806,7 @@ def test_contents_as_none(sentry_init, capture_items, mock_genai_client): integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -819,6 +833,7 @@ def test_tool_calls_extraction(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -905,6 +920,7 @@ def test_google_genai_message_truncation(sentry_init, capture_items, mock_genai_ integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -980,6 +996,7 @@ def test_embed_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1041,6 +1058,7 @@ def test_embed_content_string_input(sentry_init, capture_items, mock_genai_clien integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1087,6 +1105,7 @@ def test_embed_content_error_handling(sentry_init, capture_items, mock_genai_cli sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "event") @@ -1120,6 +1139,7 @@ def test_embed_content_without_statistics( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1159,6 +1179,7 @@ def test_embed_content_span_origin(sentry_init, capture_items, mock_genai_client sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1199,6 +1220,7 @@ async def test_async_embed_content( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1263,6 +1285,7 @@ async def test_async_embed_content_string_input( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1312,6 +1335,7 @@ async def test_async_embed_content_error_handling( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "event") @@ -1346,6 +1370,7 @@ async def test_async_embed_content_without_statistics( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1388,6 +1413,7 @@ async def test_async_embed_content_span_origin( sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1419,6 +1445,7 @@ def test_generate_content_with_content_object( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1455,6 +1482,7 @@ def test_generate_content_with_dict_format( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1487,6 +1515,7 @@ def test_generate_content_with_file_data(sentry_init, capture_items, mock_genai_ integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1536,6 +1565,7 @@ def test_generate_content_with_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1581,6 +1611,7 @@ def test_generate_content_with_function_response( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1635,6 +1666,7 @@ def test_generate_content_with_mixed_string_and_content( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1678,6 +1710,7 @@ def test_generate_content_with_part_object_directly( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1716,6 +1749,7 @@ def test_generate_content_with_list_of_dicts( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1752,6 +1786,7 @@ def test_generate_content_with_dict_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1796,6 +1831,7 @@ def test_generate_content_without_parts_property_inline_data( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1839,6 +1875,7 @@ def test_generate_content_without_parts_property_inline_data_and_binary_data_wit integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 98abbb00fa..eaac8c1ab1 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -480,6 +480,7 @@ def test_text_generation( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -555,6 +556,7 @@ def test_text_generation_streaming( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -631,6 +633,7 @@ def test_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -709,6 +712,7 @@ def test_chat_completion_streaming( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -779,7 +783,7 @@ def test_chat_completion_streaming( def test_chat_completion_api_error( sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: - sentry_init(traces_sample_rate=1.0) + sentry_init(traces_sample_rate=1.0, _experiments={"gen_ai_as_v2_spans": True}) items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -838,7 +842,7 @@ def test_chat_completion_api_error( def test_span_status_error( sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: - sentry_init(traces_sample_rate=1.0) + sentry_init(traces_sample_rate=1.0, _experiments={"gen_ai_as_v2_spans": True}) items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -881,6 +885,7 @@ def test_chat_completion_with_tools( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -976,6 +981,7 @@ def test_chat_completion_streaming_with_tools( traces_sample_rate=1.0, send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index f709d12129..ef27d45767 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -108,6 +108,7 @@ def test_langchain_text_completion( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -216,6 +217,7 @@ def test_langchain_create_agent( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -336,6 +338,7 @@ def test_tool_execution_span( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -528,6 +531,7 @@ def test_langchain_openai_tools_agent( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -865,6 +869,7 @@ def test_langchain_error(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -903,6 +908,7 @@ def test_span_status_error(sentry_init, capture_items): sentry_init( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -987,7 +993,9 @@ def _llm_type(self): def _identifying_params(self): return {} - sentry_init(integrations=[LangchainIntegration()]) + sentry_init( + integrations=[LangchainIntegration()], _experiments={"gen_ai_as_v2_spans": True} + ) # Create a manual SentryLangchainCallback manual_callback = SentryLangchainCallback( @@ -1028,6 +1036,7 @@ def test_langchain_callback_manager(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) local_manager = BaseCallbackManager(handlers=[]) @@ -1060,6 +1069,7 @@ def test_langchain_callback_manager_with_sentry_callback(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) sentry_callback = SentryLangchainCallback(0, False) local_manager = BaseCallbackManager(handlers=[sentry_callback]) @@ -1092,6 +1102,7 @@ def test_langchain_callback_list(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) local_callbacks = [] @@ -1124,6 +1135,7 @@ def test_langchain_callback_list_existing_callback(sentry_init): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) sentry_callback = SentryLangchainCallback(0, False) local_callbacks = [sentry_callback] @@ -1161,6 +1173,7 @@ def test_langchain_message_role_mapping(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1298,6 +1311,7 @@ def test_langchain_message_truncation(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1390,6 +1404,7 @@ def test_langchain_embeddings_sync( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1468,6 +1483,7 @@ def test_langchain_embeddings_embed_query( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1542,6 +1558,7 @@ async def test_langchain_embeddings_async( integrations=[LangchainIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1614,6 +1631,7 @@ async def test_langchain_embeddings_aembed_query(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1675,6 +1693,7 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_items): sentry_init( integrations=[LangchainIntegration(include_prompts=False)], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1723,7 +1742,7 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_items): pytest.skip("langchain_openai not installed") # Initialize without LangchainIntegration - sentry_init(traces_sample_rate=1.0) + sentry_init(traces_sample_rate=1.0, _experiments={"gen_ai_as_v2_spans": True}) items = capture_items("transaction", "span") with mock.patch.object( @@ -1760,6 +1779,7 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1817,6 +1837,7 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1857,6 +1878,7 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1920,6 +1942,7 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_items): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1973,6 +1996,7 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_i integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2037,6 +2061,7 @@ def test_langchain_response_model_extraction( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2342,6 +2367,7 @@ def test_langchain_ai_system_detection( sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/litellm/test_litellm.py b/tests/integrations/litellm/test_litellm.py index 90807744e7..b9365e7008 100644 --- a/tests/integrations/litellm/test_litellm.py +++ b/tests/integrations/litellm/test_litellm.py @@ -152,6 +152,7 @@ def test_nonstreaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -233,6 +234,7 @@ async def test_async_nonstreaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -316,6 +318,7 @@ def test_streaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -386,6 +389,7 @@ async def test_async_streaming_chat_completion( integrations=[LiteLLMIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -452,6 +456,7 @@ def test_embeddings_create( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -521,6 +526,7 @@ async def test_async_embeddings_create( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -585,6 +591,7 @@ def test_embeddings_create_with_list_input( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -647,6 +654,7 @@ async def test_async_embeddings_create_with_list_input( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -709,6 +717,7 @@ def test_embeddings_no_pii( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -765,6 +774,7 @@ async def test_async_embeddings_no_pii( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -815,6 +825,7 @@ def test_exception_handling( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -853,6 +864,7 @@ async def test_async_exception_handling( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -894,6 +906,7 @@ def test_span_origin( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -941,6 +954,7 @@ def test_multiple_providers( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction") @@ -1036,6 +1050,7 @@ async def test_async_multiple_providers( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1132,6 +1147,7 @@ def test_additional_parameters( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1191,6 +1207,7 @@ async def test_async_additional_parameters( sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1250,6 +1267,7 @@ def test_no_integration( """Test that when integration is not enabled, callbacks don't break.""" sentry_init( traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1296,6 +1314,7 @@ async def test_async_no_integration( """Test that when integration is not enabled, callbacks don't break.""" sentry_init( traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1338,6 +1357,7 @@ def test_response_without_usage(sentry_init, capture_items): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1379,6 +1399,7 @@ def test_integration_setup(sentry_init): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Check that callbacks are registered @@ -1393,6 +1414,7 @@ def test_litellm_message_truncation(sentry_init, capture_items): integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1459,6 +1481,7 @@ def test_binary_content_encoding_image_url( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1538,6 +1561,7 @@ async def test_async_binary_content_encoding_image_url( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1618,6 +1642,7 @@ def test_binary_content_encoding_mixed_content( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1686,6 +1711,7 @@ async def test_async_binary_content_encoding_mixed_content( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1755,6 +1781,7 @@ def test_binary_content_encoding_uri_type( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1828,6 +1855,7 @@ async def test_async_binary_content_encoding_uri_type( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index e53f8e4f55..c4d77db5c8 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -138,6 +138,7 @@ def test_nonstreaming_chat_completion_no_prompts( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -233,6 +234,7 @@ def test_nonstreaming_chat_completion(sentry_init, capture_items, messages, requ integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -312,6 +314,7 @@ async def test_nonstreaming_chat_completion_async_no_prompts( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -407,6 +410,7 @@ async def test_nonstreaming_chat_completion_async( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -502,6 +506,7 @@ def test_streaming_chat_completion_no_prompts( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -621,6 +626,7 @@ def test_streaming_chat_completion_with_usage_in_stream( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -701,6 +707,7 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -764,6 +771,7 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -829,6 +837,7 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( integrations=[OpenAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -957,6 +966,7 @@ def test_streaming_chat_completion( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1109,6 +1119,7 @@ async def test_streaming_chat_completion_async_no_prompts( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1280,6 +1291,7 @@ async def test_streaming_chat_completion_async( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1411,7 +1423,11 @@ async def test_streaming_chat_completion_async( def test_bad_chat_completion(sentry_init, capture_items): - sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event") client = OpenAI(api_key="z") @@ -1429,7 +1445,11 @@ def test_bad_chat_completion(sentry_init, capture_items): def test_span_status_error(sentry_init, capture_items): - sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event", "transaction", "span") with start_transaction(name="test"): @@ -1454,7 +1474,11 @@ def test_span_status_error(sentry_init, capture_items): @pytest.mark.asyncio async def test_bad_chat_completion_async(sentry_init, capture_items): - sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("event") client = AsyncOpenAI(api_key="z") @@ -1485,6 +1509,7 @@ def test_embeddings_create_no_pii( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1567,6 +1592,7 @@ def test_embeddings_create(sentry_init, capture_items, input, request): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1638,6 +1664,7 @@ async def test_embeddings_create_async_no_pii( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1721,6 +1748,7 @@ async def test_embeddings_create_async(sentry_init, capture_items, input, reques integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -1789,6 +1817,7 @@ def test_embeddings_create_raises_error( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -1817,6 +1846,7 @@ async def test_embeddings_create_raises_error_async( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event") @@ -1837,6 +1867,7 @@ def test_span_origin_nonstreaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1860,6 +1891,7 @@ async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1882,6 +1914,7 @@ def test_span_origin_streaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1945,6 +1978,7 @@ async def test_span_origin_streaming_chat_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2011,6 +2045,7 @@ def test_span_origin_embeddings(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2042,6 +2077,7 @@ async def test_span_origin_embeddings_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2435,6 +2471,7 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -2557,6 +2594,7 @@ def test_ai_client_span_responses_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -2767,6 +2805,7 @@ def test_error_in_responses_api(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -2873,6 +2912,7 @@ async def test_ai_client_span_responses_async_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3158,6 +3198,7 @@ async def test_ai_client_span_streaming_responses_async_api( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3383,6 +3424,7 @@ async def test_error_in_responses_async_api(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -3510,6 +3552,7 @@ def test_streaming_responses_api( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3586,6 +3629,7 @@ async def test_streaming_responses_api_async( ], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3649,6 +3693,7 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_items, tools): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3691,6 +3736,7 @@ def test_openai_message_role_mapping( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3721,6 +3767,7 @@ def test_openai_message_truncation(sentry_init, capture_items): integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3770,6 +3817,7 @@ def test_streaming_chat_completion_ttft( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3848,6 +3896,7 @@ async def test_streaming_chat_completion_ttft_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3924,6 +3973,7 @@ def test_streaming_responses_api_ttft( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -3973,6 +4023,7 @@ async def test_streaming_responses_api_ttft_async( sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index 294812b0ca..9e74848a04 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -182,6 +182,7 @@ async def test_agent_invocation_span_no_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -339,6 +340,7 @@ async def test_agent_invocation_span( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -523,6 +525,7 @@ async def test_client_span_custom_model( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span") @@ -570,6 +573,7 @@ def test_agent_invocation_span_sync_no_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -721,6 +725,7 @@ def test_agent_invocation_span_sync( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -963,6 +968,7 @@ async def test_handoff_span(sentry_init, capture_items, get_model_response): sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1092,6 +1098,7 @@ async def test_max_turns_before_handoff_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1190,6 +1197,7 @@ def simple_test_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1418,6 +1426,7 @@ async def test_hosted_mcp_tool_propagation_header_streamed( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, release="d08ebdb9309e1b004c6f52202de58a09c2268e42", + _experiments={"gen_ai_as_v2_spans": True}, ) request_headers = {} @@ -1580,6 +1589,7 @@ async def test_hosted_mcp_tool_propagation_headers( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, release="d08ebdb9309e1b004c6f52202de58a09c2268e42", + _experiments={"gen_ai_as_v2_spans": True}, ) response = get_model_response(EXAMPLE_RESPONSE, serialize_pydantic=True) @@ -1678,6 +1688,7 @@ def simple_test_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -1726,6 +1737,7 @@ async def test_error_handling(sentry_init, capture_items, test_agent): LoggingIntegration(event_level=logging.CRITICAL), ], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "span", "transaction") @@ -1791,6 +1803,7 @@ async def test_error_captures_input_data(sentry_init, capture_items, test_agent) ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "span") @@ -1835,6 +1848,7 @@ async def test_span_status_error(sentry_init, capture_items, test_agent): LoggingIntegration(event_level=logging.CRITICAL), ], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -1948,6 +1962,7 @@ async def test_mcp_tool_execution_spans( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2078,6 +2093,7 @@ async def test_mcp_tool_execution_with_error( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2202,6 +2218,7 @@ async def test_mcp_tool_execution_without_pii( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2259,6 +2276,7 @@ async def test_multiple_agents_asyncio( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2302,6 +2320,7 @@ def test_openai_agents_message_role_mapping( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) get_response_kwargs = {"input": [test_message]} @@ -2401,6 +2420,7 @@ def failing_tool(message: str) -> str: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2498,6 +2518,7 @@ async def test_invoke_agent_span_includes_usage_data( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2591,6 +2612,7 @@ async def test_ai_client_span_includes_response_model( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2679,6 +2701,7 @@ async def test_ai_client_span_response_model_with_chat_completions( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2801,6 +2824,7 @@ def calculator(a: int, b: int) -> int: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -2889,6 +2913,7 @@ async def test_invoke_agent_span_includes_response_model( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3023,6 +3048,7 @@ def calculator(a: int, b: int) -> int: integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3065,6 +3091,7 @@ def test_openai_agents_message_truncation(sentry_init, capture_items): integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_messages = [ @@ -3111,6 +3138,7 @@ async def test_streaming_span_update_captures_response_data( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create a mock streaming response object (similar to what we'd get from ResponseCompletedEvent) @@ -3176,6 +3204,7 @@ async def test_streaming_ttft_on_chat_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) request_headers = {} @@ -3330,6 +3359,7 @@ async def test_conversation_id_on_all_spans( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3468,6 +3498,7 @@ def simple_tool(message: str) -> str: sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") @@ -3531,6 +3562,7 @@ async def test_no_conversation_id_when_not_provided( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("span", "transaction") diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index fe34dd0f5d..9faccb0a84 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -61,6 +61,7 @@ async def test_agent_run_async(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -102,6 +103,7 @@ async def test_agent_run_async_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -135,6 +137,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_items, get_test_a integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -179,6 +182,7 @@ def test_agent_run_sync(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -211,6 +215,7 @@ def test_agent_run_sync_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("event", "transaction", "span") @@ -244,6 +249,7 @@ async def test_agent_run_stream(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -288,6 +294,7 @@ async def test_agent_run_stream_events(sentry_init, capture_items, get_test_agen integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -322,6 +329,7 @@ async def test_agent_with_tools(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -387,6 +395,7 @@ async def test_agent_with_tool_model_retry( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) retries = 0 @@ -470,6 +479,7 @@ async def test_agent_with_tool_validation_error( ], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -534,6 +544,7 @@ async def test_agent_with_tools_streaming(sentry_init, capture_items, get_test_a integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -583,6 +594,7 @@ async def test_model_settings(sentry_init, capture_items, get_test_agent_with_se sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -631,6 +643,7 @@ async def test_system_prompt_attribute( integrations=[PydanticAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -676,6 +689,7 @@ async def test_error_handling(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -700,6 +714,7 @@ async def test_without_pii(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -729,6 +744,7 @@ async def test_without_pii_tools(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=False, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -765,6 +781,7 @@ async def test_multiple_agents_concurrent(sentry_init, capture_items, get_test_a sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -799,6 +816,7 @@ async def test_message_history(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -848,6 +866,7 @@ async def test_gen_ai_system(sentry_init, capture_items, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -878,6 +897,7 @@ async def test_include_prompts_false(sentry_init, capture_items, get_test_agent) integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, # Even with PII enabled, prompts should not be captured + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -907,6 +927,7 @@ async def test_include_prompts_true(sentry_init, capture_items, get_test_agent): integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -938,6 +959,7 @@ async def test_include_prompts_false_with_tools( integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -975,6 +997,7 @@ async def test_include_prompts_requires_pii(sentry_init, capture_items, get_test integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1066,6 +1089,7 @@ async def mock_map_tool_result_part(part): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1135,6 +1159,7 @@ async def test_context_cleanup_after_run(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1158,6 +1183,7 @@ def test_context_cleanup_after_run_sync(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1182,6 +1208,7 @@ async def test_context_cleanup_after_streaming(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Verify context is not set before run @@ -1208,6 +1235,7 @@ async def test_context_cleanup_on_error(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1242,6 +1270,7 @@ async def test_context_isolation_concurrent_agents(sentry_init, get_test_agent): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create a second agent @@ -1297,6 +1326,7 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1347,6 +1377,7 @@ async def test_invoke_agent_with_instructions( integrations=[PydanticAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1386,6 +1417,7 @@ async def test_model_name_extraction_with_callable(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Test the utility function directly @@ -1412,6 +1444,7 @@ async def test_model_name_extraction_fallback_to_str(sentry_init, capture_items) sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Test the utility function directly @@ -1440,6 +1473,7 @@ async def test_model_settings_object_style(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1475,6 +1509,7 @@ async def test_usage_data_partial(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1508,6 +1543,7 @@ async def test_agent_data_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1530,6 +1566,7 @@ async def test_available_tools_without_description( sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1564,6 +1601,7 @@ async def test_output_with_tool_calls(sentry_init, capture_items, get_test_agent integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) test_agent = get_test_agent() @@ -1609,6 +1647,7 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_item integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1657,6 +1696,7 @@ async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_it integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1684,6 +1724,7 @@ async def test_update_ai_client_span_with_none_response(sentry_init, capture_ite sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1709,6 +1750,7 @@ async def test_agent_without_name(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1734,6 +1776,7 @@ async def test_model_response_without_parts(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1764,6 +1807,7 @@ async def test_input_messages_error_handling(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1793,6 +1837,7 @@ async def test_available_tools_error_handling(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1822,6 +1867,7 @@ async def test_set_usage_data_with_none_usage(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1848,6 +1894,7 @@ async def test_set_usage_data_with_partial_fields(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1889,6 +1936,7 @@ def test_tool(x: int) -> int: integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1918,6 +1966,7 @@ async def test_message_parts_with_list_content(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1954,6 +2003,7 @@ async def test_output_data_with_text_and_tool_calls(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -1993,6 +2043,7 @@ async def test_output_data_error_handling(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2025,6 +2076,7 @@ async def test_message_with_system_prompt_part(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2060,6 +2112,7 @@ async def test_message_with_instructions(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2094,6 +2147,7 @@ async def test_set_input_messages_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2122,6 +2176,7 @@ async def test_set_output_data_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2149,6 +2204,7 @@ async def test_get_model_name_with_exception_in_callable(sentry_init, capture_it sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create model with callable name that raises exception @@ -2172,6 +2228,7 @@ async def test_get_model_name_with_string_model(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Pass a string as model @@ -2191,6 +2248,7 @@ async def test_get_model_name_with_none(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Pass None @@ -2212,6 +2270,7 @@ async def test_set_model_data_with_system(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2243,6 +2302,7 @@ async def test_set_model_data_from_agent_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2276,6 +2336,7 @@ async def test_set_model_data_with_none_settings_values(sentry_init, capture_ite sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2308,6 +2369,7 @@ async def test_should_send_prompts_without_pii(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, # PII disabled + _experiments={"gen_ai_as_v2_spans": True}, ) # Should return False @@ -2326,6 +2388,7 @@ async def test_set_agent_data_without_agent(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2352,6 +2415,7 @@ async def test_set_agent_data_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2384,6 +2448,7 @@ async def test_set_agent_data_without_name(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2414,6 +2479,7 @@ async def test_set_available_tools_without_toolset(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2444,6 +2510,7 @@ async def test_set_available_tools_with_schema(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2483,6 +2550,7 @@ async def test_execute_tool_span_creation(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2509,6 +2577,7 @@ async def test_execute_tool_span_with_mcp_type(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2536,6 +2605,7 @@ async def test_execute_tool_span_without_prompts(sentry_init, capture_items): integrations=[PydanticAIIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2560,6 +2630,7 @@ async def test_execute_tool_span_with_none_args(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2583,6 +2654,7 @@ async def test_update_execute_tool_span_with_none_span(sentry_init, capture_item sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Update with None span - should not raise @@ -2607,6 +2679,7 @@ async def test_update_execute_tool_span_with_none_result(sentry_init, capture_it integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2630,6 +2703,7 @@ async def test_tool_execution_without_span_context(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) # Create a simple agent with no tools (won't have function_toolset) @@ -2661,6 +2735,7 @@ async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_ integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2694,6 +2769,7 @@ async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_i integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2722,6 +2798,7 @@ async def test_ai_client_span_with_streaming_flag(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2749,6 +2826,7 @@ async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) with sentry_sdk.start_transaction(op="test", name="test") as transaction: @@ -2797,6 +2875,7 @@ async def test_binary_content_encoding_image(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2827,6 +2906,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_items) integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2870,6 +2950,7 @@ async def test_binary_content_in_agent_run(sentry_init, capture_items): integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -2893,7 +2974,11 @@ async def test_binary_content_in_agent_run(sentry_init, capture_items): @pytest.mark.asyncio async def test_set_usage_data_with_cache_tokens(sentry_init, capture_items): """Test that cache_read_tokens and cache_write_tokens are tracked.""" - sentry_init(integrations=[PydanticAIIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[PydanticAIIntegration()], + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) items = capture_items("transaction", "span") @@ -2964,6 +3049,7 @@ def test_image_url_base64_content_in_span( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -3034,6 +3120,7 @@ async def test_invoke_agent_image_url( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) agent = Agent("test", name="test_image_url_agent") @@ -3081,6 +3168,7 @@ def multiply_numbers(a: int, b: int) -> int: integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") From 9b20bd24b001af3953f6d4094d6461eea2c58231 Mon Sep 17 00:00:00 2001 From: Alexander Alderman Webb Date: Fri, 17 Apr 2026 15:17:29 +0200 Subject: [PATCH 36/36] add experimental option to langgraph tests --- tests/integrations/langgraph/test_langgraph.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/integrations/langgraph/test_langgraph.py b/tests/integrations/langgraph/test_langgraph.py index e1a3baa0a8..b70889548f 100644 --- a/tests/integrations/langgraph/test_langgraph.py +++ b/tests/integrations/langgraph/test_langgraph.py @@ -154,6 +154,7 @@ def test_state_graph_compile( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") graph = MockStateGraph() @@ -209,6 +210,7 @@ def test_pregel_invoke(sentry_init, capture_items, send_default_pii, include_pro integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -311,6 +313,7 @@ def test_pregel_ainvoke(sentry_init, capture_items, send_default_pii, include_pr integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("What's the weather like?", name="user")]} @@ -391,6 +394,7 @@ def test_pregel_invoke_error(sentry_init, capture_items): integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("This will fail")]} @@ -421,6 +425,7 @@ def test_pregel_ainvoke_error(sentry_init, capture_items): integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("This will fail async")]} @@ -455,6 +460,7 @@ def test_span_origin(sentry_init, capture_items): sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -486,6 +492,7 @@ def test_pregel_invoke_with_different_graph_names( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -529,6 +536,7 @@ def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_items): sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -605,6 +613,7 @@ def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_items): sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -684,6 +693,7 @@ def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_i sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -765,6 +775,7 @@ def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_ sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -849,6 +860,7 @@ def test_pregel_invoke_span_includes_response_model(sentry_init, capture_items): sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -921,6 +933,7 @@ def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_items) sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -996,6 +1009,7 @@ def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_items) sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1080,6 +1094,7 @@ def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_items sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1212,6 +1227,7 @@ def test_extraction_functions_complex_scenario(sentry_init, capture_items): integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1287,6 +1303,7 @@ def test_langgraph_message_role_mapping(sentry_init, capture_items): integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span") @@ -1361,6 +1378,7 @@ def test_langgraph_message_truncation(sentry_init, capture_items): integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, + _experiments={"gen_ai_as_v2_spans": True}, ) items = capture_items("transaction", "span")