Skip to content

Commit a8b416f

Browse files
committed
cleanup
Signed-off-by: Alex <[email protected]>
1 parent aec2e94 commit a8b416f

File tree

1 file changed

+0
-11
lines changed

1 file changed

+0
-11
lines changed

vllm/model_executor/models/utils.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@ def _load_module(
186186
) -> Iterable[str]:
187187
if isinstance(module, PPMissingLayer):
188188
return
189-
190189

191190
# Avoid infinite recursion since this function is typically
192191
# called inside load_weights of the module itself
@@ -206,15 +205,6 @@ def _load_module(
206205

207206
child_modules = dict(module.named_children())
208207
child_params = dict(module.named_parameters(recurse=False))
209-
# print("===============================================")
210-
# print(f"In base_prefix: {base_prefix}")
211-
# if base_prefix == "model":
212-
# print(module)
213-
# for module in child_modules.keys():
214-
# print(f"\tchild module: {module}")
215-
# for param in child_params.keys():
216-
# print(f"\tchild param: {param}")
217-
# print("===============================================")
218208

219209
# Add missing tensors the weight loader needs to be able to load
220210
# that aren't registered as params, e.g., batchnorm statistics.
@@ -268,7 +258,6 @@ def load_weights(
268258
if mapper is not None:
269259
weights = mapper.apply(weights)
270260

271-
weights = list(weights)
272261
autoloaded_weights = set(self._load_module("", self.module, weights))
273262
return autoloaded_weights
274263

0 commit comments

Comments
 (0)