Skip to content

Commit dacb5b1

Browse files
Merge branch 'master' into szokeasaurusrex/keep-alive-env-var
2 parents f9cf4fb + cb82483 commit dacb5b1

20 files changed

+275
-94
lines changed

.github/workflows/release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ jobs:
2020
steps:
2121
- name: Get auth token
2222
id: token
23-
uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2
23+
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
2424
with:
2525
app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }}
2626
private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }}

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
_Bad software is everywhere, and we're tired of it. Sentry is on a mission to help developers write better software faster, so we can get back to enjoying technology. If you want to join us
77
[<kbd>**Check out our open positions**</kbd>](https://sentry.io/careers/)_.
88

9-
[![Discord](https://img.shields.io/discord/621778831602221064?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.gg/wdNEHETs87)
9+
[![Discord](https://img.shields.io/discord/621778831602221064?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb)](https://discord.com/invite/Ww9hbqr)
1010
[![Twitter Follow](https://img.shields.io/twitter/follow/getsentry?label=@getsentry&style=social)](https://twitter.com/intent/follow?screen_name=getsentry)
1111
[![PyPi page link -- version](https://img.shields.io/pypi/v/sentry-sdk.svg)](https://pypi.python.org/pypi/sentry-sdk)
1212
<img src="https://img.shields.io/badge/python-3.7 | 3.8 | 3.9 | 3.10 | 3.11 | 3.12 | 3.13-blue.svg" alt="python">
@@ -106,7 +106,7 @@ If you encounter issues or need help setting up or configuring the SDK, don't he
106106
Here are all resources to help you make the most of Sentry:
107107

108108
- [Documentation](https://docs.sentry.io/platforms/python/) - Official documentation to get started.
109-
- [Discord](https://img.shields.io/discord/621778831602221064) - Join our Discord community.
109+
- [Discord](https://discord.com/invite/Ww9hbqr) - Join our Discord community.
110110
- [X/Twitter](https://twitter.com/intent/follow?screen_name=getsentry) - Follow us on X (Twitter) for updates.
111111
- [Stack Overflow](https://stackoverflow.com/questions/tagged/sentry) - Questions and answers related to Sentry.
112112

docs/api.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ Capturing Data
2525
Enriching Events
2626
================
2727

28+
.. autofunction:: sentry_sdk.api.add_attachment
2829
.. autofunction:: sentry_sdk.api.add_breadcrumb
2930
.. autofunction:: sentry_sdk.api.set_context
3031
.. autofunction:: sentry_sdk.api.set_extra
@@ -63,4 +64,3 @@ Managing Scope (advanced)
6364
.. autofunction:: sentry_sdk.api.push_scope
6465

6566
.. autofunction:: sentry_sdk.api.new_scope
66-

requirements-docs.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,4 @@ shibuya
33
sphinx<8.2
44
sphinx-autodoc-typehints[type_comments]>=1.8.0
55
typing-extensions
6+
snowballstemmer<3.0

sentry_sdk/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
"integrations",
1616
# From sentry_sdk.api
1717
"init",
18+
"add_attachment",
1819
"add_breadcrumb",
1920
"capture_event",
2021
"capture_exception",

sentry_sdk/ai/monitoring.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import inspect
22
from functools import wraps
33

4+
from sentry_sdk.consts import SPANDATA
45
import sentry_sdk.utils
56
from sentry_sdk import start_span
67
from sentry_sdk.tracing import Span
@@ -39,7 +40,7 @@ def sync_wrapped(*args, **kwargs):
3940
for k, v in kwargs.pop("sentry_data", {}).items():
4041
span.set_data(k, v)
4142
if curr_pipeline:
42-
span.set_data("ai.pipeline.name", curr_pipeline)
43+
span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
4344
return f(*args, **kwargs)
4445
else:
4546
_ai_pipeline_name.set(description)
@@ -68,7 +69,7 @@ async def async_wrapped(*args, **kwargs):
6869
for k, v in kwargs.pop("sentry_data", {}).items():
6970
span.set_data(k, v)
7071
if curr_pipeline:
71-
span.set_data("ai.pipeline.name", curr_pipeline)
72+
span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
7273
return await f(*args, **kwargs)
7374
else:
7475
_ai_pipeline_name.set(description)
@@ -100,7 +101,7 @@ def record_token_usage(
100101
# type: (Span, Optional[int], Optional[int], Optional[int]) -> None
101102
ai_pipeline_name = get_ai_pipeline_name()
102103
if ai_pipeline_name:
103-
span.set_data("ai.pipeline.name", ai_pipeline_name)
104+
span.set_data(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
104105
if prompt_tokens is not None:
105106
span.set_measurement("ai_prompt_tokens_used", value=prompt_tokens)
106107
if completion_tokens is not None:

sentry_sdk/api.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ def overload(x):
5151
# When changing this, update __all__ in __init__.py too
5252
__all__ = [
5353
"init",
54+
"add_attachment",
5455
"add_breadcrumb",
5556
"capture_event",
5657
"capture_exception",
@@ -184,6 +185,20 @@ def capture_exception(
184185
return get_current_scope().capture_exception(error, scope=scope, **scope_kwargs)
185186

186187

188+
@scopemethod
189+
def add_attachment(
190+
bytes=None, # type: Union[None, bytes, Callable[[], bytes]]
191+
filename=None, # type: Optional[str]
192+
path=None, # type: Optional[str]
193+
content_type=None, # type: Optional[str]
194+
add_to_transactions=False, # type: bool
195+
):
196+
# type: (...) -> None
197+
return get_isolation_scope().add_attachment(
198+
bytes, filename, path, content_type, add_to_transactions
199+
)
200+
201+
187202
@scopemethod
188203
def add_breadcrumb(
189204
crumb=None, # type: Optional[Breadcrumb]

sentry_sdk/consts.py

Lines changed: 61 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ class SPANDATA:
187187
For an AI model call, the format of the response
188188
"""
189189

190-
AI_LOGIT_BIAS = "ai.response_format"
190+
AI_LOGIT_BIAS = "ai.logit_bias"
191191
"""
192192
For an AI model call, the logit bias
193193
"""
@@ -204,7 +204,6 @@ class SPANDATA:
204204
Minimize pre-processing done to the prompt sent to the LLM.
205205
Example: true
206206
"""
207-
208207
AI_RESPONSES = "ai.responses"
209208
"""
210209
The responses to an AI model call. Always as a list.
@@ -217,6 +216,66 @@ class SPANDATA:
217216
Example: 123.45
218217
"""
219218

219+
AI_CITATIONS = "ai.citations"
220+
"""
221+
References or sources cited by the AI model in its response.
222+
Example: ["Smith et al. 2020", "Jones 2019"]
223+
"""
224+
225+
AI_DOCUMENTS = "ai.documents"
226+
"""
227+
Documents or content chunks used as context for the AI model.
228+
Example: ["doc1.txt", "doc2.pdf"]
229+
"""
230+
231+
AI_SEARCH_QUERIES = "ai.search_queries"
232+
"""
233+
Queries used to search for relevant context or documents.
234+
Example: ["climate change effects", "renewable energy"]
235+
"""
236+
237+
AI_SEARCH_RESULTS = "ai.search_results"
238+
"""
239+
Results returned from search queries for context.
240+
Example: ["Result 1", "Result 2"]
241+
"""
242+
243+
AI_GENERATION_ID = "ai.generation_id"
244+
"""
245+
Unique identifier for the completion.
246+
Example: "gen_123abc"
247+
"""
248+
249+
AI_SEARCH_REQUIRED = "ai.is_search_required"
250+
"""
251+
Boolean indicating if the model needs to perform a search.
252+
Example: true
253+
"""
254+
255+
AI_FINISH_REASON = "ai.finish_reason"
256+
"""
257+
The reason why the model stopped generating.
258+
Example: "length"
259+
"""
260+
261+
AI_PIPELINE_NAME = "ai.pipeline.name"
262+
"""
263+
Name of the AI pipeline or chain being executed.
264+
Example: "qa-pipeline"
265+
"""
266+
267+
AI_TEXTS = "ai.texts"
268+
"""
269+
Raw text inputs provided to the model.
270+
Example: ["What is machine learning?"]
271+
"""
272+
273+
AI_WARNINGS = "ai.warnings"
274+
"""
275+
Warning messages generated during model execution.
276+
Example: ["Token limit exceeded"]
277+
"""
278+
220279
DB_NAME = "db.name"
221280
"""
222281
The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).

sentry_sdk/feature_flags.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def add_feature_flag(flag, result):
6464
Records a flag and its value to be sent on subsequent error events.
6565
We recommend you do this on flag evaluations. Flags are buffered per Sentry scope.
6666
"""
67-
flags = sentry_sdk.get_current_scope().flags
67+
flags = sentry_sdk.get_isolation_scope().flags
6868
flags.set(flag, result)
6969

7070
span = sentry_sdk.get_current_span()

sentry_sdk/integrations/cohere.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -52,17 +52,17 @@
5252
}
5353

5454
COLLECTED_CHAT_RESP_ATTRS = {
55-
"generation_id": "ai.generation_id",
56-
"is_search_required": "ai.is_search_required",
57-
"finish_reason": "ai.finish_reason",
55+
"generation_id": SPANDATA.AI_GENERATION_ID,
56+
"is_search_required": SPANDATA.AI_SEARCH_REQUIRED,
57+
"finish_reason": SPANDATA.AI_FINISH_REASON,
5858
}
5959

6060
COLLECTED_PII_CHAT_RESP_ATTRS = {
61-
"citations": "ai.citations",
62-
"documents": "ai.documents",
63-
"search_queries": "ai.search_queries",
64-
"search_results": "ai.search_results",
65-
"tool_calls": "ai.tool_calls",
61+
"citations": SPANDATA.AI_CITATIONS,
62+
"documents": SPANDATA.AI_DOCUMENTS,
63+
"search_queries": SPANDATA.AI_SEARCH_QUERIES,
64+
"search_results": SPANDATA.AI_SEARCH_RESULTS,
65+
"tool_calls": SPANDATA.AI_TOOL_CALLS,
6666
}
6767

6868

@@ -127,7 +127,7 @@ def collect_chat_response_fields(span, res, include_pii):
127127
)
128128

129129
if hasattr(res.meta, "warnings"):
130-
set_data_normalized(span, "ai.warnings", res.meta.warnings)
130+
set_data_normalized(span, SPANDATA.AI_WARNINGS, res.meta.warnings)
131131

132132
@wraps(f)
133133
def new_chat(*args, **kwargs):
@@ -238,7 +238,7 @@ def new_embed(*args, **kwargs):
238238
should_send_default_pii() and integration.include_prompts
239239
):
240240
if isinstance(kwargs["texts"], str):
241-
set_data_normalized(span, "ai.texts", [kwargs["texts"]])
241+
set_data_normalized(span, SPANDATA.AI_TEXTS, [kwargs["texts"]])
242242
elif (
243243
isinstance(kwargs["texts"], list)
244244
and len(kwargs["texts"]) > 0

sentry_sdk/integrations/huggingface_hub.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def new_text_generation(*args, **kwargs):
9797
if should_send_default_pii() and integration.include_prompts:
9898
set_data_normalized(
9999
span,
100-
"ai.responses",
100+
SPANDATA.AI_RESPONSES,
101101
[res],
102102
)
103103
span.__exit__(None, None, None)
@@ -107,7 +107,7 @@ def new_text_generation(*args, **kwargs):
107107
if should_send_default_pii() and integration.include_prompts:
108108
set_data_normalized(
109109
span,
110-
"ai.responses",
110+
SPANDATA.AI_RESPONSES,
111111
[res.generated_text],
112112
)
113113
if res.details is not None and res.details.generated_tokens > 0:

sentry_sdk/integrations/openai.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
155155
if should_send_default_pii() and integration.include_prompts:
156156
set_data_normalized(
157157
span,
158-
"ai.responses",
158+
SPANDATA.AI_RESPONSES,
159159
list(map(lambda x: x.message, res.choices)),
160160
)
161161
_calculate_chat_completion_usage(
@@ -329,15 +329,15 @@ def _new_embeddings_create_common(f, *args, **kwargs):
329329
should_send_default_pii() and integration.include_prompts
330330
):
331331
if isinstance(kwargs["input"], str):
332-
set_data_normalized(span, "ai.input_messages", [kwargs["input"]])
332+
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
333333
elif (
334334
isinstance(kwargs["input"], list)
335335
and len(kwargs["input"]) > 0
336336
and isinstance(kwargs["input"][0], str)
337337
):
338-
set_data_normalized(span, "ai.input_messages", kwargs["input"])
338+
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
339339
if "model" in kwargs:
340-
set_data_normalized(span, "ai.model_id", kwargs["model"])
340+
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
341341

342342
response = yield f, args, kwargs
343343

tests/integrations/anthropic/test_anthropic.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ def test_nonstreaming_create_message(
128128
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
129129
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
130130
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
131-
assert span["data"]["ai.streaming"] is False
131+
assert span["data"][SPANDATA.AI_STREAMING] is False
132132

133133

134134
@pytest.mark.asyncio
@@ -196,7 +196,7 @@ async def test_nonstreaming_create_message_async(
196196
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
197197
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
198198
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
199-
assert span["data"]["ai.streaming"] is False
199+
assert span["data"][SPANDATA.AI_STREAMING] is False
200200

201201

202202
@pytest.mark.parametrize(
@@ -296,7 +296,7 @@ def test_streaming_create_message(
296296
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
297297
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
298298
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
299-
assert span["data"]["ai.streaming"] is True
299+
assert span["data"][SPANDATA.AI_STREAMING] is True
300300

301301

302302
@pytest.mark.asyncio
@@ -399,7 +399,7 @@ async def test_streaming_create_message_async(
399399
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
400400
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
401401
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
402-
assert span["data"]["ai.streaming"] is True
402+
assert span["data"][SPANDATA.AI_STREAMING] is True
403403

404404

405405
@pytest.mark.skipif(
@@ -528,7 +528,7 @@ def test_streaming_create_message_with_input_json_delta(
528528
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
529529
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
530530
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
531-
assert span["data"]["ai.streaming"] is True
531+
assert span["data"][SPANDATA.AI_STREAMING] is True
532532

533533

534534
@pytest.mark.asyncio
@@ -665,7 +665,7 @@ async def test_streaming_create_message_with_input_json_delta_async(
665665
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
666666
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
667667
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
668-
assert span["data"]["ai.streaming"] is True
668+
assert span["data"][SPANDATA.AI_STREAMING] is True
669669

670670

671671
def test_exception_message_create(sentry_init, capture_events):
@@ -810,7 +810,7 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init):
810810
assert span._data.get(SPANDATA.AI_RESPONSES) == [
811811
{"type": "text", "text": "{'test': 'data','more': 'json'}"}
812812
]
813-
assert span._data.get("ai.streaming") is True
813+
assert span._data.get(SPANDATA.AI_STREAMING) is True
814814
assert span._measurements.get("ai_prompt_tokens_used")["value"] == 10
815815
assert span._measurements.get("ai_completion_tokens_used")["value"] == 20
816816
assert span._measurements.get("ai_total_tokens_used")["value"] == 30

0 commit comments

Comments
 (0)