Skip to content

Commit d39e7f9

Browse files
committed
comments
1 parent eb6ad2b commit d39e7f9

File tree

2 files changed

+3
-0
lines changed

2 files changed

+3
-0
lines changed

src/sparseml/modifiers/quantization/gptq/utils/gptq_wrapper.py

+1
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ def fasterprune(
8989
Run pruning and quantization(if applicable) on the layer up to the target
9090
sparsity value.
9191
92+
:param actorder: Flag to apply activation reordering
9293
:param blocksize: Number of columns to compress in one pass
9394
:param percdamp: Amount of dampening to apply to H, as a fraction of the
9495
diagonal norm

src/sparseml/modifiers/utils/layer_compressor.py

+2
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,8 @@ def revert_layer_wrappers(self):
134134
def compress(self, actorder: bool = False):
135135
"""
136136
Apply compression to each wrapped submodule in the layer
137+
138+
:param: actorder: flag to apply activation reordering
137139
"""
138140

139141
@torch.no_grad()

0 commit comments

Comments
 (0)