Skip to content

Commit e1bca1a

Browse files
committed
Loosen openai version for instrumentation + linting
1 parent 6383978 commit e1bca1a

File tree

7 files changed

+116
-71
lines changed

7 files changed

+116
-71
lines changed

Diff for: instrumentation/opentelemetry-instrumentation-openai/pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,12 @@ classifiers = [
2626
]
2727
dependencies = [
2828
"opentelemetry-api ~= 1.12",
29-
"opentelemetry-instrumentation == 0.48b0.dev",
29+
"opentelemetry-instrumentation == 0.47b0",
3030
]
3131

3232
[project.optional-dependencies]
3333
instruments = [
34-
"openai ~= 1.37.1",
34+
"openai >= 0.27.0",
3535
]
3636

3737
[project.entry-points.opentelemetry_instrumentor]

Diff for: instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/__init__.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
from opentelemetry.trace import get_tracer
4949
from wrapt import wrap_function_wrapper
5050
from langtrace_python_sdk.instrumentation.openai.patch import (
51-
chat_completions_create
51+
chat_completions_create,
5252
)
5353

5454

@@ -58,16 +58,16 @@ def instrumentation_dependencies(self) -> Collection[str]:
5858
return _instruments
5959

6060
def _instrument(self, **kwargs):
61-
"""Enable OpenAI instrumentation.
62-
"""
61+
"""Enable OpenAI instrumentation."""
6362
tracer_provider = kwargs.get("tracer_provider")
6463
tracer = get_tracer(__name__, "", tracer_provider)
6564
version = importlib.metadata.version("openai")
66-
6765
wrap_function_wrapper(
6866
"openai.resources.chat.completions",
6967
"Completions.create",
70-
chat_completions_create("openai.chat.completions.create", version, tracer),
68+
chat_completions_create(
69+
"openai.chat.completions.create", version, tracer
70+
),
7171
)
7272

7373
def _uninstrument(self, **kwargs):

Diff for: instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/package.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,4 @@
1313
# limitations under the License.
1414

1515

16-
_instruments = ("openai ~= 1.37.1",)
16+
_instruments = ("openai >= 0.27.0",)

Diff for: instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/patch.py

+48-11
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,12 @@
1919
from opentelemetry.trace.propagation import set_span_in_context
2020
from openai._types import NOT_GIVEN
2121
from span_attributes import SpanAttributes, LLMSpanAttributes, Event
22-
from utils import estimate_tokens, silently_fail, extract_content, calculate_prompt_tokens
22+
from utils import (
23+
estimate_tokens,
24+
silently_fail,
25+
extract_content,
26+
calculate_prompt_tokens,
27+
)
2328

2429

2530
def chat_completions_create(original_method, version, tracer):
@@ -34,7 +39,11 @@ def traced_method(wrapped, instance, args, kwargs):
3439
for tool_call in tools:
3540
tool_call_dict = {
3641
"id": tool_call.id if hasattr(tool_call, "id") else "",
37-
"type": tool_call.type if hasattr(tool_call, "type") else "",
42+
"type": (
43+
tool_call.type
44+
if hasattr(tool_call, "type")
45+
else ""
46+
),
3847
}
3948
if hasattr(tool_call, "function"):
4049
tool_call_dict["function"] = {
@@ -125,9 +134,14 @@ def _set_input_attributes(span, kwargs, attributes):
125134
for field, value in attributes.model_dump(by_alias=True).items():
126135
set_span_attribute(span, field, value)
127136

128-
if kwargs.get("functions") is not None and kwargs.get("functions") != NOT_GIVEN:
137+
if (
138+
kwargs.get("functions") is not None
139+
and kwargs.get("functions") != NOT_GIVEN
140+
):
129141
for function in kwargs.get("functions"):
130-
tools.append(json.dumps({"type": "function", "function": function}))
142+
tools.append(
143+
json.dumps({"type": "function", "function": function})
144+
)
131145

132146
if kwargs.get("tools") is not None and kwargs.get("tools") != NOT_GIVEN:
133147
tools.append(json.dumps(kwargs.get("tools")))
@@ -149,7 +163,11 @@ def _set_response_attributes(span, kwargs, result):
149163
),
150164
"content": extract_content(choice),
151165
**(
152-
{"content_filter_results": choice["content_filter_results"]}
166+
{
167+
"content_filter_results": choice[
168+
"content_filter_results"
169+
]
170+
}
153171
if "content_filter_results" in choice
154172
else {}
155173
),
@@ -239,7 +257,9 @@ def is_streaming(kwargs):
239257
)
240258

241259

242-
def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name="chat"):
260+
def get_llm_request_attributes(
261+
kwargs, prompts=None, model=None, operation_name="chat"
262+
):
243263

244264
user = kwargs.get("user", None)
245265
if prompts is None:
@@ -267,7 +287,9 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
267287
SpanAttributes.LLM_USER: user,
268288
SpanAttributes.LLM_REQUEST_TOP_P: top_p,
269289
SpanAttributes.LLM_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
270-
SpanAttributes.LLM_SYSTEM_FINGERPRINT: kwargs.get("system_fingerprint"),
290+
SpanAttributes.LLM_SYSTEM_FINGERPRINT: kwargs.get(
291+
"system_fingerprint"
292+
),
271293
SpanAttributes.LLM_PRESENCE_PENALTY: kwargs.get("presence_penalty"),
272294
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
273295
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
@@ -283,7 +305,12 @@ class StreamWrapper:
283305
span: Span
284306

285307
def __init__(
286-
self, stream, span, prompt_tokens, function_call=False, tool_calls=False
308+
self,
309+
stream,
310+
span,
311+
prompt_tokens,
312+
function_call=False,
313+
tool_calls=False,
287314
):
288315
self.stream = stream
289316
self.span = span
@@ -416,7 +443,11 @@ def process_chunk(self, chunk):
416443
content.append(tool_call.function.arguments)
417444
set_event_completion_chunk(
418445
self.span,
419-
"".join(content) if len(content) > 0 and content[0] is not None else "",
446+
(
447+
"".join(content)
448+
if len(content) > 0 and content[0] is not None
449+
else ""
450+
),
420451
)
421452
if content:
422453
self.result_content.append(content[0])
@@ -427,12 +458,18 @@ def process_chunk(self, chunk):
427458
content = [chunk.text]
428459
set_event_completion_chunk(
429460
self.span,
430-
"".join(content) if len(content) > 0 and content[0] is not None else "",
461+
(
462+
"".join(content)
463+
if len(content) > 0 and content[0] is not None
464+
else ""
465+
),
431466
)
432467

433468
if content:
434469
self.result_content.append(content[0])
435470

436471
if hasattr(chunk, "usage_metadata"):
437-
self.completion_tokens = chunk.usage_metadata.candidates_token_count
472+
self.completion_tokens = (
473+
chunk.usage_metadata.candidates_token_count
474+
)
438475
self.prompt_tokens = chunk.usage_metadata.prompt_token_count

Diff for: instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/span_attributes.py

+56-50
Original file line numberDiff line numberDiff line change
@@ -80,133 +80,139 @@ class LLMSpanAttributes(BaseModel):
8080
model_config = ConfigDict(extra="allow")
8181
gen_ai_operation_name: str = Field(
8282
...,
83-
alias='gen_ai.operation.name',
84-
description='The name of the operation being performed.',
83+
alias="gen_ai.operation.name",
84+
description="The name of the operation being performed.",
8585
)
8686
gen_ai_request_model: str = Field(
8787
...,
88-
alias='gen_ai.request.model',
89-
description='Model name from the input request',
88+
alias="gen_ai.request.model",
89+
description="Model name from the input request",
9090
)
9191
gen_ai_response_model: Optional[str] = Field(
92-
None, alias='gen_ai.response.model', description='Model name from the response'
92+
None,
93+
alias="gen_ai.response.model",
94+
description="Model name from the response",
9395
)
9496
gen_ai_request_temperature: Optional[float] = Field(
9597
None,
96-
alias='gen_ai.request.temperature',
97-
description='Temperature value from the input request',
98+
alias="gen_ai.request.temperature",
99+
description="Temperature value from the input request",
98100
)
99101
gen_ai_request_logit_bias: Optional[str] = Field(
100102
None,
101-
alias='gen_ai.request.logit_bias',
102-
description='Likelihood bias of the specified tokens the input request.',
103+
alias="gen_ai.request.logit_bias",
104+
description="Likelihood bias of the specified tokens the input request.",
103105
)
104106
gen_ai_request_logprobs: Optional[bool] = Field(
105107
None,
106-
alias='gen_ai.request.logprobs',
107-
description='Logprobs flag returns log probabilities.',
108+
alias="gen_ai.request.logprobs",
109+
description="Logprobs flag returns log probabilities.",
108110
)
109111
gen_ai_request_top_logprobs: Optional[float] = Field(
110112
None,
111-
alias='gen_ai.request.top_logprobs',
112-
description='Integer between 0 and 5 specifying the number of most likely tokens to return.',
113+
alias="gen_ai.request.top_logprobs",
114+
description="Integer between 0 and 5 specifying the number of most likely tokens to return.",
113115
)
114116
gen_ai_request_top_p: Optional[float] = Field(
115117
None,
116-
alias='gen_ai.request.top_p',
117-
description='Top P value from the input request',
118+
alias="gen_ai.request.top_p",
119+
description="Top P value from the input request",
118120
)
119121
gen_ai_request_top_k: Optional[float] = Field(
120122
None,
121-
alias='gen_ai.request.top_k',
122-
description='Top K results to return from the input request',
123+
alias="gen_ai.request.top_k",
124+
description="Top K results to return from the input request",
123125
)
124126
gen_ai_user: Optional[str] = Field(
125-
None, alias='gen_ai.user', description='User ID from the input request'
127+
None, alias="gen_ai.user", description="User ID from the input request"
126128
)
127129
gen_ai_prompt: Optional[str] = Field(
128-
None, alias='gen_ai.prompt', description='Prompt text from the input request'
130+
None,
131+
alias="gen_ai.prompt",
132+
description="Prompt text from the input request",
129133
)
130134
gen_ai_completion: Optional[str] = Field(
131135
None,
132-
alias='gen_ai.completion',
136+
alias="gen_ai.completion",
133137
description='Completion text from the response. This will be an array of json objects with the following format {"role": "", "content": ""}. Role can be one of the following values: [system, user, assistant, tool]',
134138
)
135139
gen_ai_request_stream: Optional[bool] = Field(
136140
None,
137-
alias='gen_ai.request.stream',
138-
description='Stream flag from the input request',
141+
alias="gen_ai.request.stream",
142+
description="Stream flag from the input request",
139143
)
140144
gen_ai_request_encoding_formats: Optional[List[str]] = Field(
141145
None,
142-
alias='gen_ai.request.encoding_formats',
146+
alias="gen_ai.request.encoding_formats",
143147
description="Encoding formats from the input request. Allowed values: ['float', 'int8','uint8', 'binary', 'ubinary', 'base64']",
144148
)
145149
gen_ai_completion_chunk: Optional[str] = Field(
146150
None,
147-
alias='gen_ai.completion.chunk',
148-
description='Chunk text from the response',
151+
alias="gen_ai.completion.chunk",
152+
description="Chunk text from the response",
149153
)
150154
gen_ai_response_finish_reasons: Optional[List[str]] = Field(
151155
None,
152-
alias='gen_ai.response.finish_reasons',
153-
description='Array of reasons the model stopped generating tokens, corresponding to each generation received',
156+
alias="gen_ai.response.finish_reasons",
157+
description="Array of reasons the model stopped generating tokens, corresponding to each generation received",
154158
)
155159
gen_ai_system_fingerprint: Optional[str] = Field(
156160
None,
157-
alias='gen_ai.system_fingerprint',
158-
description='System fingerprint of the system that generated the response',
161+
alias="gen_ai.system_fingerprint",
162+
description="System fingerprint of the system that generated the response",
159163
)
160164
gen_ai_request_tool_choice: Optional[str] = Field(
161165
None,
162-
alias='gen_ai.request.tool_choice',
163-
description='Tool choice from the input request',
166+
alias="gen_ai.request.tool_choice",
167+
description="Tool choice from the input request",
164168
)
165169
gen_ai_response_tool_calls: Optional[str] = Field(
166170
None,
167-
alias='gen_ai.response.tool_calls',
168-
description='Array of tool calls from the response json stringified',
171+
alias="gen_ai.response.tool_calls",
172+
description="Array of tool calls from the response json stringified",
169173
)
170174
gen_ai_request_max_tokens: Optional[float] = Field(
171175
None,
172-
alias='gen_ai.request.max_tokens',
173-
description='The maximum number of tokens the LLM generates for a request.',
176+
alias="gen_ai.request.max_tokens",
177+
description="The maximum number of tokens the LLM generates for a request.",
174178
)
175179
gen_ai_usage_input_tokens: Optional[float] = Field(
176180
None,
177-
alias='gen_ai.usage.input_tokens',
178-
description='The number of tokens used in the llm prompt.',
181+
alias="gen_ai.usage.input_tokens",
182+
description="The number of tokens used in the llm prompt.",
179183
)
180184
gen_ai_usage_total_tokens: Optional[float] = Field(
181185
None,
182-
alias='gen_ai.usage.total_tokens',
183-
description='The total number of tokens used in the llm request.',
186+
alias="gen_ai.usage.total_tokens",
187+
description="The total number of tokens used in the llm request.",
184188
)
185189
gen_ai_usage_output_tokens: Optional[float] = Field(
186190
None,
187-
alias='gen_ai.usage.output_tokens',
188-
description='The number of tokens in the llm response.',
191+
alias="gen_ai.usage.output_tokens",
192+
description="The number of tokens in the llm response.",
189193
)
190194
gen_ai_request_seed: Optional[str] = Field(
191-
None, alias='gen_ai.request.seed', description='Seed from the input request'
195+
None,
196+
alias="gen_ai.request.seed",
197+
description="Seed from the input request",
192198
)
193199
gen_ai_request_frequency_penalty: Optional[float] = Field(
194200
None,
195-
alias='gen_ai.request.frequency_penalty',
196-
description='Frequency penalty from the input request',
201+
alias="gen_ai.request.frequency_penalty",
202+
description="Frequency penalty from the input request",
197203
)
198204
gen_ai_request_presence_penalty: Optional[float] = Field(
199205
None,
200-
alias='gen_ai.request.presence_penalty',
201-
description='Presence penalty from the input request',
206+
alias="gen_ai.request.presence_penalty",
207+
description="Presence penalty from the input request",
202208
)
203209
gen_ai_request_tools: Optional[str] = Field(
204210
None,
205-
alias='gen_ai.request.tools',
206-
description='An array of tools from the input request json stringified',
211+
alias="gen_ai.request.tools",
212+
description="An array of tools from the input request json stringified",
207213
)
208214
gen_ai_request_tool_results: Optional[str] = Field(
209215
None,
210-
alias='gen_ai.request.tool_results',
211-
description='An array of tool results from the input request json stringified',
216+
alias="gen_ai.request.tool_results",
217+
description="An array of tool results from the input request json stringified",
212218
)

Diff for: instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/utils.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,9 @@ def wrapper(*args, **kwargs):
5656
return func(*args, **kwargs)
5757
except Exception as exception:
5858
logger.warning(
59-
"Failed to execute %s, error: %s", func.__name__, str(exception)
59+
"Failed to execute %s, error: %s",
60+
func.__name__,
61+
str(exception),
6062
)
6163

6264
return wrapper

Diff for: instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
__version__ = "0.0.1dev"
15+
__version__ = "0.47b0"

0 commit comments

Comments
 (0)