We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent eb6ad2b commit d39e7f9Copy full SHA for d39e7f9
src/sparseml/modifiers/quantization/gptq/utils/gptq_wrapper.py
@@ -89,6 +89,7 @@ def fasterprune(
89
Run pruning and quantization(if applicable) on the layer up to the target
90
sparsity value.
91
92
+ :param actorder: Flag to apply activation reordering
93
:param blocksize: Number of columns to compress in one pass
94
:param percdamp: Amount of dampening to apply to H, as a fraction of the
95
diagonal norm
src/sparseml/modifiers/utils/layer_compressor.py
@@ -134,6 +134,8 @@ def revert_layer_wrappers(self):
134
def compress(self, actorder: bool = False):
135
"""
136
Apply compression to each wrapped submodule in the layer
137
+
138
+ :param: actorder: flag to apply activation reordering
139
140
141
@torch.no_grad()
0 commit comments