Skip to content

Commit 5ded719

Browse files
committed
Rename variables. Strip '_lst' suffix.
variables: - num_prompt_tokens_lst -> num_prompt_tokens - num_generation_tokens_lst -> num_generation_tokens
1 parent f9bc64e commit 5ded719

File tree

2 files changed

+14
-14
lines changed

2 files changed

+14
-14
lines changed

vllm/engine/llm_engine.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -574,8 +574,8 @@ def _get_stats(self,
574574
num_waiting = len(self.scheduler.waiting)
575575

576576
# Iteration stats if we have scheduler output.
577-
num_prompt_tokens_lst = []
578-
num_generation_tokens_lst = []
577+
num_prompt_tokens = []
578+
num_generation_tokens = []
579579
request_n = []
580580
request_best_of = []
581581
time_to_first_tokens = []
@@ -587,13 +587,13 @@ def _get_stats(self,
587587

588588
# Number of Tokens
589589
if prompt_run:
590-
num_prompt_tokens_lst = [
590+
num_prompt_tokens = [
591591
len(scheduled_seq_group.seq_group.prompt_token_ids)
592592
for scheduled_seq_group in
593593
scheduler_outputs.scheduled_seq_groups
594594
]
595595
else:
596-
num_generation_tokens_lst = [
596+
num_generation_tokens = [
597597
seq.get_output_len() for scheduled_seq_group in
598598
scheduler_outputs.scheduled_seq_groups for seq in
599599
scheduled_seq_group.seq_group.get_finished_seqs()
@@ -643,8 +643,8 @@ def _get_stats(self,
643643
gpu_cache_usage=gpu_cache_usage,
644644
cpu_cache_usage=cpu_cache_usage,
645645
finished_reason_lst=finished_reason_lst,
646-
num_prompt_tokens_lst=num_prompt_tokens_lst,
647-
num_generation_tokens_lst=num_generation_tokens_lst,
646+
num_prompt_tokens=num_prompt_tokens,
647+
num_generation_tokens=num_generation_tokens,
648648
request_n=request_n,
649649
request_best_of=request_best_of,
650650
time_to_first_tokens=time_to_first_tokens,

vllm/engine/metrics.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -168,8 +168,8 @@ class Stats:
168168

169169
# Raw stats from last model iteration.
170170
finished_reason_lst: List[str]
171-
num_prompt_tokens_lst: List[int]
172-
num_generation_tokens_lst: List[int]
171+
num_prompt_tokens: List[int]
172+
num_generation_tokens: List[int]
173173
request_n: List[int]
174174
request_best_of: List[int]
175175
time_to_first_tokens: List[float]
@@ -227,9 +227,9 @@ def _log_prometheus(self, stats: Stats) -> None:
227227

228228
# Add to token counters.
229229
self.metrics.counter_prompt_tokens.labels(**self.labels).inc(
230-
sum(stats.num_prompt_tokens_lst))
230+
sum(stats.num_prompt_tokens))
231231
self.metrics.counter_generation_tokens.labels(**self.labels).inc(
232-
sum(stats.num_generation_tokens_lst))
232+
sum(stats.num_generation_tokens))
233233

234234
# Add to request counters.
235235
finished_reason_counter = CollectionsCounter(stats.finished_reason_lst)
@@ -241,10 +241,10 @@ def _log_prometheus(self, stats: Stats) -> None:
241241
}).inc(count)
242242

243243
# Observe number of tokens in histograms.
244-
for val in stats.num_prompt_tokens_lst:
244+
for val in stats.num_prompt_tokens:
245245
self.metrics.histogram_request_prompt_tokens.labels(
246246
**self.labels).observe(val)
247-
for val in stats.num_generation_tokens_lst:
247+
for val in stats.num_generation_tokens:
248248
self.metrics.histogram_request_generation_tokens.labels(
249249
**self.labels).observe(val)
250250

@@ -289,8 +289,8 @@ def log(self, stats: Stats) -> None:
289289
self._log_prometheus(stats)
290290

291291
# Save tracked stats for token counters.
292-
self.num_prompt_tokens.append(sum(stats.num_prompt_tokens_lst))
293-
self.num_generation_tokens.append(sum(stats.num_generation_tokens_lst))
292+
self.num_prompt_tokens.append(sum(stats.num_prompt_tokens))
293+
self.num_generation_tokens.append(sum(stats.num_generation_tokens))
294294

295295
# Log locally every local_interval seconds.
296296
if self._local_interval_elapsed(stats.now):

0 commit comments

Comments
 (0)