Skip to content

Commit eaf6c35

Browse files
schuellc-nvidiaPouyanpi
authored andcommitted
Add unique identifier to LLM prompt/response logging message
1 parent aa45e8e commit eaf6c35

File tree

3 files changed

+17
-4
lines changed

3 files changed

+17
-4
lines changed

nemoguardrails/logging/callbacks.py

+14-3
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from nemoguardrails.logging.explain import LLMCallInfo
2828
from nemoguardrails.logging.processing_log import processing_log_var
2929
from nemoguardrails.logging.stats import LLMStats
30+
from nemoguardrails.utils import new_uuid
3031

3132
log = logging.getLogger(__name__)
3233

@@ -51,6 +52,8 @@ async def on_llm_start(
5152
llm_call_info = LLMCallInfo()
5253
llm_call_info_var.set(llm_call_info)
5354

55+
llm_call_info.id = new_uuid()
56+
5457
# We also add it to the explain object
5558
explain_info = explain_info_var.get()
5659
if explain_info:
@@ -86,6 +89,8 @@ async def on_chat_model_start(
8689
llm_call_info = LLMCallInfo()
8790
llm_call_info_var.set(llm_call_info)
8891

92+
llm_call_info.id = new_uuid()
93+
8994
# We also add it to the explain object
9095
explain_info = explain_info_var.get()
9196
if explain_info:
@@ -109,7 +114,7 @@ async def on_chat_model_start(
109114
)
110115

111116
log.info("Invocation Params :: %s", kwargs.get("invocation_params", {}))
112-
log.info("Prompt Messages :: %s", prompt)
117+
log.info("Prompt Messages :: %s", prompt, extra={"id": llm_call_info.id})
113118
llm_call_info.prompt = prompt
114119
llm_call_info.started_at = time()
115120

@@ -143,12 +148,16 @@ async def on_llm_end(
143148
**kwargs: Any,
144149
) -> None:
145150
"""Run when LLM ends running."""
146-
log.info("Completion :: %s", response.generations[0][0].text)
147151
llm_call_info = llm_call_info_var.get()
148152
if llm_call_info is None:
149153
llm_call_info = LLMCallInfo()
150154
llm_call_info.completion = response.generations[0][0].text
151155
llm_call_info.finished_at = time()
156+
log.info(
157+
"Completion :: %s",
158+
response.generations[0][0].text,
159+
extra={"id": llm_call_info.id},
160+
)
152161

153162
llm_stats = llm_stats_var.get()
154163
if llm_stats is None:
@@ -159,7 +168,9 @@ async def on_llm_end(
159168
if len(response.generations[0]) > 1:
160169
for i, generation in enumerate(response.generations[0][1:]):
161170
log.info("--- :: Completion %d", i + 2)
162-
log.info("Completion :: %s", generation.text)
171+
log.info(
172+
"Completion :: %s", generation.text, extra={"id": llm_call_info.id}
173+
)
163174

164175
log.info("Output Stats :: %s", response.llm_output)
165176
took = llm_call_info.finished_at - llm_call_info.started_at

nemoguardrails/logging/explain.py

+1
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ class LLMCallSummary(BaseModel):
4343

4444

4545
class LLMCallInfo(LLMCallSummary):
46+
id: Optional[str] = Field(default=None, description="The unique prompt identifier.")
4647
prompt: Optional[str] = Field(
4748
default=None, description="The prompt that was used for the LLM call."
4849
)

nemoguardrails/logging/verbose.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def emit(self, record) -> None:
5454
skip_print = True
5555
if verbose_llm_calls:
5656
console.print("")
57-
console.print(f"[cyan]LLM {title}[/]")
57+
console.print(f"[cyan]LLM {title} ({record.id[:5]}..)[/]")
5858
for line in body.split("\n"):
5959
text = Text(line, style="black on #006600", end="\n")
6060
text.pad_right(console.width)
@@ -66,6 +66,7 @@ def emit(self, record) -> None:
6666
if verbose_llm_calls:
6767
skip_print = True
6868
console.print("")
69+
console.print(f"[cyan]LLM Prompt ({record.id[:5]}..)[/]")
6970

7071
for line in body.split("\n"):
7172
if line.strip() == "[/]":

0 commit comments

Comments
 (0)