Skip to content

Commit 3e1833b

Browse files
Merge pull request #811 from NVIDIA/feature/improve_llm_logs
Feature/improve llm logs
2 parents 1cd9c91 + 6b2fdcd commit 3e1833b

File tree

4 files changed

+56
-5
lines changed

4 files changed

+56
-5
lines changed

nemoguardrails/actions/v2_x/generation.py

+26
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,10 @@ async def generate_user_intent(
273273
if is_embedding_only:
274274
return f"{potential_user_intents[0]}"
275275

276+
llm_call_info_var.set(
277+
LLMCallInfo(task=Task.GENERATE_USER_INTENT_FROM_USER_ACTION.value)
278+
)
279+
276280
prompt = self.llm_task_manager.render_task_prompt(
277281
task=Task.GENERATE_USER_INTENT_FROM_USER_ACTION,
278282
events=events,
@@ -343,6 +347,12 @@ async def generate_user_intent_and_bot_action(
343347
state, user_action, max_example_flows
344348
)
345349

350+
llm_call_info_var.set(
351+
LLMCallInfo(
352+
task=Task.GENERATE_USER_INTENT_AND_BOT_ACTION_FROM_USER_ACTION.value
353+
)
354+
)
355+
346356
prompt = self.llm_task_manager.render_task_prompt(
347357
task=Task.GENERATE_USER_INTENT_AND_BOT_ACTION_FROM_USER_ACTION,
348358
events=events,
@@ -504,6 +514,10 @@ async def generate_flow_from_instructions(
504514
flow_id = new_uuid()[0:4]
505515
flow_name = f"dynamic_{flow_id}"
506516

517+
llm_call_info_var.set(
518+
LLMCallInfo(task=Task.GENERATE_FLOW_FROM_INSTRUCTIONS.value)
519+
)
520+
507521
prompt = self.llm_task_manager.render_task_prompt(
508522
task=Task.GENERATE_FLOW_FROM_INSTRUCTIONS,
509523
events=events,
@@ -567,6 +581,8 @@ async def generate_flow_from_name(
567581
for result in reversed(results):
568582
examples += f"{result.meta['flow']}\n"
569583

584+
llm_call_info_var.set(LLMCallInfo(task=Task.GENERATE_FLOW_FROM_NAME.value))
585+
570586
prompt = self.llm_task_manager.render_task_prompt(
571587
task=Task.GENERATE_FLOW_FROM_NAME,
572588
events=events,
@@ -628,6 +644,8 @@ async def generate_flow_continuation(
628644

629645
# TODO: add examples from the actual running flows
630646

647+
llm_call_info_var.set(LLMCallInfo(task=Task.GENERATE_FLOW_CONTINUATION.value))
648+
631649
prompt = self.llm_task_manager.render_task_prompt(
632650
task=Task.GENERATE_FLOW_CONTINUATION,
633651
events=events,
@@ -743,6 +761,10 @@ async def generate_value(
743761
if "GenerateValueAction" not in result.text:
744762
examples += f"{result.text}\n\n"
745763

764+
llm_call_info_var.set(
765+
LLMCallInfo(task=Task.GENERATE_VALUE_FROM_INSTRUCTION.value)
766+
)
767+
746768
prompt = self.llm_task_manager.render_task_prompt(
747769
task=Task.GENERATE_VALUE_FROM_INSTRUCTION,
748770
events=events,
@@ -849,6 +871,10 @@ async def generate_flow(
849871
textwrap.dedent(docstring), context=render_context, events=events
850872
)
851873

874+
llm_call_info_var.set(
875+
LLMCallInfo(task=Task.GENERATE_FLOW_CONTINUATION_FROM_NLD.value)
876+
)
877+
852878
prompt = self.llm_task_manager.render_task_prompt(
853879
task=Task.GENERATE_FLOW_CONTINUATION_FROM_NLD,
854880
events=events,

nemoguardrails/logging/callbacks.py

+25-4
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from nemoguardrails.logging.explain import LLMCallInfo
2828
from nemoguardrails.logging.processing_log import processing_log_var
2929
from nemoguardrails.logging.stats import LLMStats
30+
from nemoguardrails.utils import new_uuid
3031

3132
log = logging.getLogger(__name__)
3233

@@ -51,13 +52,19 @@ async def on_llm_start(
5152
llm_call_info = LLMCallInfo()
5253
llm_call_info_var.set(llm_call_info)
5354

55+
llm_call_info.id = new_uuid()
56+
5457
# We also add it to the explain object
5558
explain_info = explain_info_var.get()
5659
if explain_info:
5760
explain_info.llm_calls.append(llm_call_info)
5861

5962
log.info("Invocation Params :: %s", kwargs.get("invocation_params", {}))
60-
log.info("Prompt :: %s", prompts[0])
63+
log.info(
64+
"Prompt :: %s",
65+
prompts[0],
66+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
67+
)
6168
llm_call_info.prompt = prompts[0]
6269

6370
llm_call_info.started_at = time()
@@ -86,6 +93,8 @@ async def on_chat_model_start(
8693
llm_call_info = LLMCallInfo()
8794
llm_call_info_var.set(llm_call_info)
8895

96+
llm_call_info.id = new_uuid()
97+
8998
# We also add it to the explain object
9099
explain_info = explain_info_var.get()
91100
if explain_info:
@@ -109,7 +118,11 @@ async def on_chat_model_start(
109118
)
110119

111120
log.info("Invocation Params :: %s", kwargs.get("invocation_params", {}))
112-
log.info("Prompt Messages :: %s", prompt)
121+
log.info(
122+
"Prompt Messages :: %s",
123+
prompt,
124+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
125+
)
113126
llm_call_info.prompt = prompt
114127
llm_call_info.started_at = time()
115128

@@ -143,12 +156,16 @@ async def on_llm_end(
143156
**kwargs: Any,
144157
) -> None:
145158
"""Run when LLM ends running."""
146-
log.info("Completion :: %s", response.generations[0][0].text)
147159
llm_call_info = llm_call_info_var.get()
148160
if llm_call_info is None:
149161
llm_call_info = LLMCallInfo()
150162
llm_call_info.completion = response.generations[0][0].text
151163
llm_call_info.finished_at = time()
164+
log.info(
165+
"Completion :: %s",
166+
response.generations[0][0].text,
167+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
168+
)
152169

153170
llm_stats = llm_stats_var.get()
154171
if llm_stats is None:
@@ -159,7 +176,11 @@ async def on_llm_end(
159176
if len(response.generations[0]) > 1:
160177
for i, generation in enumerate(response.generations[0][1:]):
161178
log.info("--- :: Completion %d", i + 2)
162-
log.info("Completion :: %s", generation.text)
179+
log.info(
180+
"Completion :: %s",
181+
generation.text,
182+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
183+
)
163184

164185
log.info("Output Stats :: %s", response.llm_output)
165186
took = llm_call_info.finished_at - llm_call_info.started_at

nemoguardrails/logging/explain.py

+1
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ class LLMCallSummary(BaseModel):
4343

4444

4545
class LLMCallInfo(LLMCallSummary):
46+
id: Optional[str] = Field(default=None, description="The unique prompt identifier.")
4647
prompt: Optional[str] = Field(
4748
default=None, description="The prompt that was used for the LLM call."
4849
)

nemoguardrails/logging/verbose.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def emit(self, record) -> None:
5454
skip_print = True
5555
if verbose_llm_calls:
5656
console.print("")
57-
console.print(f"[cyan]LLM {title}[/]")
57+
console.print(f"[cyan]LLM {title} ({record.id[:5]}..)[/]")
5858
for line in body.split("\n"):
5959
text = Text(line, style="black on #006600", end="\n")
6060
text.pad_right(console.width)
@@ -66,6 +66,9 @@ def emit(self, record) -> None:
6666
if verbose_llm_calls:
6767
skip_print = True
6868
console.print("")
69+
console.print(
70+
f"[cyan]LLM Prompt ({record.id[:5]}..) - {record.task}[/]"
71+
)
6972

7073
for line in body.split("\n"):
7174
if line.strip() == "[/]":

0 commit comments

Comments
 (0)