diff --git a/torchvision/models/video/mvit.py b/torchvision/models/video/mvit.py index c686858ff08..94ed87b360c 100644 --- a/torchvision/models/video/mvit.py +++ b/torchvision/models/video/mvit.py @@ -667,6 +667,8 @@ def mvit_v1_b(*, weights: Optional[MViT_V1_B_Weights] = None, progress: bool = T Constructs a base MViTV1 architecture from `Multiscale Vision Transformers `__. + .. betastatus:: video module + Args: weights (:class:`~torchvision.models.video.MViT_V1_B_Weights`, optional): The pretrained weights to use. See @@ -763,6 +765,8 @@ def mvit_v2_s(*, weights: Optional[MViT_V2_S_Weights] = None, progress: bool = T Constructs a small MViTV2 architecture from `Multiscale Vision Transformers `__. + .. betastatus:: video module + Args: weights (:class:`~torchvision.models.video.MViT_V2_S_Weights`, optional): The pretrained weights to use. See diff --git a/torchvision/models/video/s3d.py b/torchvision/models/video/s3d.py index bd698b03d6c..53e3e841a27 100644 --- a/torchvision/models/video/s3d.py +++ b/torchvision/models/video/s3d.py @@ -187,6 +187,8 @@ def s3d(*, weights: Optional[S3D_Weights] = None, progress: bool = True, **kwarg Reference: `Rethinking Spatiotemporal Feature Learning `__. + .. betastatus:: video module + Args: weights (:class:`~torchvision.models.video.S3D_Weights`, optional): The pretrained weights to use. See