Skip to content

Commit 6fa0b4a

Browse files
schuellc-nvidiaPouyanpi
authored andcommitted
Add prompt task name to logging messages
1 parent eaf6c35 commit 6fa0b4a

File tree

3 files changed

+40
-4
lines changed

3 files changed

+40
-4
lines changed

nemoguardrails/actions/v2_x/generation.py

+28
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,13 @@
4848
get_element_from_head,
4949
get_event_from_element,
5050
)
51+
from nemoguardrails.context import llm_call_info_var
5152
from nemoguardrails.embeddings.index import EmbeddingsIndex, IndexItem
5253
from nemoguardrails.llm.filters import colang
5354
from nemoguardrails.llm.params import llm_params
5455
from nemoguardrails.llm.types import Task
5556
from nemoguardrails.logging import verbose
57+
from nemoguardrails.logging.explain import LLMCallInfo
5658
from nemoguardrails.utils import console, new_uuid
5759

5860
log = logging.getLogger(__name__)
@@ -265,6 +267,10 @@ async def generate_user_intent(
265267
if is_embedding_only:
266268
return f"{potential_user_intents[0]}"
267269

270+
llm_call_info_var.set(
271+
LLMCallInfo(task=Task.GENERATE_USER_INTENT_FROM_USER_ACTION.value)
272+
)
273+
268274
prompt = self.llm_task_manager.render_task_prompt(
269275
task=Task.GENERATE_USER_INTENT_FROM_USER_ACTION,
270276
events=events,
@@ -335,6 +341,12 @@ async def generate_user_intent_and_bot_action(
335341
state, user_action, max_example_flows
336342
)
337343

344+
llm_call_info_var.set(
345+
LLMCallInfo(
346+
task=Task.GENERATE_USER_INTENT_AND_BOT_ACTION_FROM_USER_ACTION.value
347+
)
348+
)
349+
338350
prompt = self.llm_task_manager.render_task_prompt(
339351
task=Task.GENERATE_USER_INTENT_AND_BOT_ACTION_FROM_USER_ACTION,
340352
events=events,
@@ -448,6 +460,10 @@ async def generate_flow_from_instructions(
448460
flow_id = new_uuid()[0:4]
449461
flow_name = f"dynamic_{flow_id}"
450462

463+
llm_call_info_var.set(
464+
LLMCallInfo(task=Task.GENERATE_FLOW_FROM_INSTRUCTIONS.value)
465+
)
466+
451467
prompt = self.llm_task_manager.render_task_prompt(
452468
task=Task.GENERATE_FLOW_FROM_INSTRUCTIONS,
453469
events=events,
@@ -511,6 +527,8 @@ async def generate_flow_from_name(
511527
for result in reversed(results):
512528
examples += f"{result.meta['flow']}\n"
513529

530+
llm_call_info_var.set(LLMCallInfo(task=Task.GENERATE_FLOW_FROM_NAME.value))
531+
514532
prompt = self.llm_task_manager.render_task_prompt(
515533
task=Task.GENERATE_FLOW_FROM_NAME,
516534
events=events,
@@ -572,6 +590,8 @@ async def generate_flow_continuation(
572590

573591
# TODO: add examples from the actual running flows
574592

593+
llm_call_info_var.set(LLMCallInfo(task=Task.GENERATE_FLOW_CONTINUATION.value))
594+
575595
prompt = self.llm_task_manager.render_task_prompt(
576596
task=Task.GENERATE_FLOW_CONTINUATION,
577597
events=events,
@@ -687,6 +707,10 @@ async def generate_value(
687707
if "GenerateValueAction" not in result.text:
688708
examples += f"{result.text}\n\n"
689709

710+
llm_call_info_var.set(
711+
LLMCallInfo(task=Task.GENERATE_VALUE_FROM_INSTRUCTION.value)
712+
)
713+
690714
prompt = self.llm_task_manager.render_task_prompt(
691715
task=Task.GENERATE_VALUE_FROM_INSTRUCTION,
692716
events=events,
@@ -793,6 +817,10 @@ async def generate_flow(
793817
textwrap.dedent(docstring), context=render_context, events=events
794818
)
795819

820+
llm_call_info_var.set(
821+
LLMCallInfo(task=Task.GENERATE_FLOW_CONTINUATION_FROM_NLD.value)
822+
)
823+
796824
prompt = self.llm_task_manager.render_task_prompt(
797825
task=Task.GENERATE_FLOW_CONTINUATION_FROM_NLD,
798826
events=events,

nemoguardrails/logging/callbacks.py

+9-3
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,11 @@ async def on_chat_model_start(
114114
)
115115

116116
log.info("Invocation Params :: %s", kwargs.get("invocation_params", {}))
117-
log.info("Prompt Messages :: %s", prompt, extra={"id": llm_call_info.id})
117+
log.info(
118+
"Prompt Messages :: %s",
119+
prompt,
120+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
121+
)
118122
llm_call_info.prompt = prompt
119123
llm_call_info.started_at = time()
120124

@@ -156,7 +160,7 @@ async def on_llm_end(
156160
log.info(
157161
"Completion :: %s",
158162
response.generations[0][0].text,
159-
extra={"id": llm_call_info.id},
163+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
160164
)
161165

162166
llm_stats = llm_stats_var.get()
@@ -169,7 +173,9 @@ async def on_llm_end(
169173
for i, generation in enumerate(response.generations[0][1:]):
170174
log.info("--- :: Completion %d", i + 2)
171175
log.info(
172-
"Completion :: %s", generation.text, extra={"id": llm_call_info.id}
176+
"Completion :: %s",
177+
generation.text,
178+
extra={"id": llm_call_info.id, "task": llm_call_info.task},
173179
)
174180

175181
log.info("Output Stats :: %s", response.llm_output)

nemoguardrails/logging/verbose.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,9 @@ def emit(self, record) -> None:
6666
if verbose_llm_calls:
6767
skip_print = True
6868
console.print("")
69-
console.print(f"[cyan]LLM Prompt ({record.id[:5]}..)[/]")
69+
console.print(
70+
f"[cyan]LLM Prompt ({record.id[:5]}..) - {record.task}[/]"
71+
)
7072

7173
for line in body.split("\n"):
7274
if line.strip() == "[/]":

0 commit comments

Comments
 (0)