@@ -38,28 +38,53 @@ def test_generate_content(
38
38
assert dict (spans [0 ].attributes ) == {
39
39
"gen_ai.operation.name" : "chat" ,
40
40
"gen_ai.request.model" : "gemini-1.5-flash-002" ,
41
+ "gen_ai.response.finish_reasons" : ("stop" ,),
42
+ "gen_ai.response.model" : "gemini-1.5-flash-002" ,
41
43
"gen_ai.system" : "vertex_ai" ,
44
+ "gen_ai.usage.input_tokens" : 5 ,
45
+ "gen_ai.usage.output_tokens" : 19 ,
42
46
"server.address" : "us-central1-aiplatform.googleapis.com" ,
43
47
"server.port" : 443 ,
44
48
}
45
49
46
- # Emits content event
50
+ # Emits user and choice events
47
51
logs = log_exporter .get_finished_logs ()
48
- assert len (logs ) == 1
49
- log_record = logs [0 ].log_record
52
+ assert len (logs ) == 2
53
+ user_log , choice_log = [log_data .log_record for log_data in logs ]
54
+
50
55
span_context = spans [0 ].get_span_context ()
51
- assert log_record .trace_id == span_context .trace_id
52
- assert log_record .span_id == span_context .span_id
53
- assert log_record .trace_flags == span_context .trace_flags
54
- assert log_record .attributes == {
56
+ assert user_log .trace_id == span_context .trace_id
57
+ assert user_log .span_id == span_context .span_id
58
+ assert user_log .trace_flags == span_context .trace_flags
59
+ assert user_log .attributes == {
55
60
"gen_ai.system" : "vertex_ai" ,
56
61
"event.name" : "gen_ai.user.message" ,
57
62
}
58
- assert log_record .body == {
63
+ assert user_log .body == {
59
64
"content" : [{"text" : "Say this is a test" }],
60
65
"role" : "user" ,
61
66
}
62
67
68
+ assert choice_log .trace_id == span_context .trace_id
69
+ assert choice_log .span_id == span_context .span_id
70
+ assert choice_log .trace_flags == span_context .trace_flags
71
+ assert choice_log .attributes == {
72
+ "gen_ai.system" : "vertex_ai" ,
73
+ "event.name" : "gen_ai.choice" ,
74
+ }
75
+ assert choice_log .body == {
76
+ "finish_reason" : "stop" ,
77
+ "index" : 0 ,
78
+ "message" : {
79
+ "content" : [
80
+ {
81
+ "text" : "Okay, I understand. I'm ready for your test. Please proceed.\n "
82
+ }
83
+ ],
84
+ "role" : "model" ,
85
+ },
86
+ }
87
+
63
88
64
89
@pytest .mark .vcr
65
90
def test_generate_content_without_events (
@@ -81,20 +106,34 @@ def test_generate_content_without_events(
81
106
assert dict (spans [0 ].attributes ) == {
82
107
"gen_ai.operation.name" : "chat" ,
83
108
"gen_ai.request.model" : "gemini-1.5-flash-002" ,
109
+ "gen_ai.response.finish_reasons" : ("stop" ,),
110
+ "gen_ai.response.model" : "gemini-1.5-flash-002" ,
84
111
"gen_ai.system" : "vertex_ai" ,
112
+ "gen_ai.usage.input_tokens" : 5 ,
113
+ "gen_ai.usage.output_tokens" : 19 ,
85
114
"server.address" : "us-central1-aiplatform.googleapis.com" ,
86
115
"server.port" : 443 ,
87
116
}
88
117
89
- # Emits event without body.content
118
+ # Emits user and choice event without body.content
90
119
logs = log_exporter .get_finished_logs ()
91
- assert len (logs ) == 1
92
- log_record = logs [ 0 ] .log_record
93
- assert log_record .attributes == {
120
+ assert len (logs ) == 2
121
+ user_log , choice_log = [ log_data .log_record for log_data in logs ]
122
+ assert user_log .attributes == {
94
123
"gen_ai.system" : "vertex_ai" ,
95
124
"event.name" : "gen_ai.user.message" ,
96
125
}
97
- assert log_record .body == {"role" : "user" }
126
+ assert user_log .body == {"role" : "user" }
127
+
128
+ assert choice_log .attributes == {
129
+ "gen_ai.system" : "vertex_ai" ,
130
+ "event.name" : "gen_ai.choice" ,
131
+ }
132
+ assert choice_log .body == {
133
+ "finish_reason" : "stop" ,
134
+ "index" : 0 ,
135
+ "message" : {"role" : "model" },
136
+ }
98
137
99
138
100
139
@pytest .mark .vcr
@@ -255,7 +294,11 @@ def test_generate_content_extra_params(span_exporter, instrument_no_content):
255
294
"gen_ai.request.stop_sequences" : ("\n \n \n " ,),
256
295
"gen_ai.request.temperature" : 0.20000000298023224 ,
257
296
"gen_ai.request.top_p" : 0.949999988079071 ,
297
+ "gen_ai.response.finish_reasons" : ("length" ,),
298
+ "gen_ai.response.model" : "gemini-1.5-flash-002" ,
258
299
"gen_ai.system" : "vertex_ai" ,
300
+ "gen_ai.usage.input_tokens" : 5 ,
301
+ "gen_ai.usage.output_tokens" : 5 ,
259
302
"server.address" : "us-central1-aiplatform.googleapis.com" ,
260
303
"server.port" : 443 ,
261
304
}
@@ -274,7 +317,7 @@ def assert_span_error(span: ReadableSpan) -> None:
274
317
275
318
276
319
@pytest .mark .vcr
277
- def test_generate_content_all_input_events (
320
+ def test_generate_content_all_events (
278
321
log_exporter : InMemoryLogExporter ,
279
322
instrument_with_content : VertexAIInstrumentor ,
280
323
):
@@ -299,10 +342,10 @@ def test_generate_content_all_input_events(
299
342
],
300
343
)
301
344
302
- # Emits a system event, 2 users events, and a assistant event
345
+ # Emits a system event, 2 users events, an assistant event, and the choice (response) event
303
346
logs = log_exporter .get_finished_logs ()
304
- assert len (logs ) == 4
305
- system_log , user_log1 , assistant_log , user_log2 = [
347
+ assert len (logs ) == 5
348
+ system_log , user_log1 , assistant_log , user_log2 , choice_log = [
306
349
log_data .log_record for log_data in logs
307
350
]
308
351
@@ -342,3 +385,16 @@ def test_generate_content_all_input_events(
342
385
"content" : [{"text" : "Address me by name and say this is a test" }],
343
386
"role" : "user" ,
344
387
}
388
+
389
+ assert choice_log .attributes == {
390
+ "gen_ai.system" : "vertex_ai" ,
391
+ "event.name" : "gen_ai.choice" ,
392
+ }
393
+ assert choice_log .body == {
394
+ "finish_reason" : "stop" ,
395
+ "index" : 0 ,
396
+ "message" : {
397
+ "content" : [{"text" : "OpenTelemetry, this is a test.\n " }],
398
+ "role" : "model" ,
399
+ },
400
+ }
0 commit comments