We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent efa81ab commit 78d52b5Copy full SHA for 78d52b5
pytorch_lightning/plugins/precision/fully_sharded_native_amp.py
@@ -20,17 +20,12 @@
20
from pytorch_lightning.utilities import _FAIRSCALE_FULLY_SHARDED_AVAILABLE, GradClipAlgorithmType
21
22
if _FAIRSCALE_FULLY_SHARDED_AVAILABLE:
23
- from fairscale.experimental.optim import DynamicLossScaler
24
from fairscale.nn.data_parallel import FullyShardedDataParallel
25
26
27
class FullyShardedNativeMixedPrecisionPlugin(ShardedNativeMixedPrecisionPlugin):
28
"""Mixed Precision for Full Sharded Training"""
29
30
- def __init__(self) -> None:
31
- super().__init__()
32
- self.scaler = DynamicLossScaler()
33
-
34
def clip_gradients(
35
self,
36
optimizer: 'Optimizer',
0 commit comments