Skip to content

Commit 253e543

Browse files
committed
Replace all _FP/_FT direct calls.
1 parent 04a3eab commit 253e543

File tree

7 files changed

+19
-18
lines changed

7 files changed

+19
-18
lines changed

torchvision/prototype/transforms/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from ._auto_augment import RandAugment, TrivialAugmentWide, AutoAugment
99
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
1010
from ._geometry import HorizontalFlip, Resize, CenterCrop, RandomResizedCrop
11-
from ._meta_conversion import ConvertBoundingBoxFormat, ConvertImageDtype, ConvertImageColorSpace
11+
from ._meta import ConvertBoundingBoxFormat, ConvertImageDtype, ConvertImageColorSpace
1212
from ._misc import Identity, Normalize, ToDtype, Lambda
1313
from ._presets import CocoEval, ImageNetEval, VocEval, Kinect400Eval, RaftEval
1414
from ._type_conversion import DecodeImage, LabelToOneHot

torchvision/prototype/transforms/_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import torch
55
from torchvision.prototype import features
66
from torchvision.prototype.utils._internal import query_recursively
7-
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
7+
from .functional._meta import get_dimensions_image_tensor, get_dimensions_image_pil
88

99

1010
def query_image(sample: Any) -> Union[PIL.Image.Image, torch.Tensor, features.Image]:
@@ -25,9 +25,9 @@ def get_image_dimensions(image: Union[PIL.Image.Image, torch.Tensor, features.Im
2525
channels = image.num_channels
2626
height, width = image.image_size
2727
elif isinstance(image, torch.Tensor):
28-
channels, height, width = _FT.get_dimensions(image)
28+
channels, height, width = get_dimensions_image_tensor(image)
2929
elif isinstance(image, PIL.Image.Image):
30-
channels, height, width = _FP.get_dimensions(image)
30+
channels, height, width = get_dimensions_image_pil(image)
3131
else:
3232
raise TypeError(f"unable to get image dimensions from object of type {type(image).__name__}")
3333
return channels, height, width

torchvision/prototype/transforms/functional/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from torchvision.transforms import InterpolationMode # usort: skip
2-
from ._meta_conversion import (
2+
from ._meta import (
33
convert_bounding_box_format,
44
convert_image_color_space_tensor,
55
convert_image_color_space_pil,

torchvision/prototype/transforms/functional/_geometry.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
99
from torchvision.transforms.functional import pil_modes_mapping, _get_inverse_affine_matrix
1010

11-
from ._meta_conversion import convert_bounding_box_format
11+
from ._meta import convert_bounding_box_format, get_dimensions_image_tensor, get_dimensions_image_pil
1212

1313

1414
horizontal_flip_image_tensor = _FT.hflip
@@ -39,7 +39,7 @@ def resize_image_tensor(
3939
antialias: Optional[bool] = None,
4040
) -> torch.Tensor:
4141
new_height, new_width = size
42-
num_channels, old_height, old_width = _FT.get_dimensions(image)
42+
num_channels, old_height, old_width = get_dimensions_image_tensor(image)
4343
batch_shape = image.shape[:-3]
4444
return _FT.resize(
4545
image.reshape((-1, num_channels, old_height, old_width)),
@@ -141,7 +141,7 @@ def affine_image_tensor(
141141

142142
center_f = [0.0, 0.0]
143143
if center is not None:
144-
_, height, width = _FT.get_dimensions(img)
144+
_, height, width = get_dimensions_image_tensor(img)
145145
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
146146
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, (width, height))]
147147

@@ -167,7 +167,7 @@ def affine_image_pil(
167167
# it is visually better to estimate the center without 0.5 offset
168168
# otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
169169
if center is None:
170-
_, height, width = _FP.get_dimensions(img)
170+
_, height, width = get_dimensions_image_pil(img)
171171
center = [width * 0.5, height * 0.5]
172172
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
173173

@@ -184,7 +184,7 @@ def rotate_image_tensor(
184184
) -> torch.Tensor:
185185
center_f = [0.0, 0.0]
186186
if center is not None:
187-
_, height, width = _FT.get_dimensions(img)
187+
_, height, width = get_dimensions_image_tensor(img)
188188
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
189189
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, (width, height))]
190190

@@ -260,13 +260,13 @@ def _center_crop_compute_crop_anchor(
260260

261261
def center_crop_image_tensor(img: torch.Tensor, output_size: List[int]) -> torch.Tensor:
262262
crop_height, crop_width = _center_crop_parse_output_size(output_size)
263-
_, image_height, image_width = _FT.get_dimensions(img)
263+
_, image_height, image_width = get_dimensions_image_tensor(img)
264264

265265
if crop_height > image_height or crop_width > image_width:
266266
padding_ltrb = _center_crop_compute_padding(crop_height, crop_width, image_height, image_width)
267267
img = pad_image_tensor(img, padding_ltrb, fill=0)
268268

269-
_, image_height, image_width = _FT.get_dimensions(img)
269+
_, image_height, image_width = get_dimensions_image_tensor(img)
270270
if crop_width == image_width and crop_height == image_height:
271271
return img
272272

@@ -276,13 +276,13 @@ def center_crop_image_tensor(img: torch.Tensor, output_size: List[int]) -> torch
276276

277277
def center_crop_image_pil(img: PIL.Image.Image, output_size: List[int]) -> PIL.Image.Image:
278278
crop_height, crop_width = _center_crop_parse_output_size(output_size)
279-
_, image_height, image_width = _FP.get_dimensions(img)
279+
_, image_height, image_width = get_dimensions_image_pil(img)
280280

281281
if crop_height > image_height or crop_width > image_width:
282282
padding_ltrb = _center_crop_compute_padding(crop_height, crop_width, image_height, image_width)
283283
img = pad_image_pil(img, padding_ltrb, fill=0)
284284

285-
_, image_height, image_width = _FP.get_dimensions(img)
285+
_, image_height, image_width = get_dimensions_image_pil(img)
286286
if crop_width == image_width and crop_height == image_height:
287287
return img
288288

torchvision/prototype/transforms/functional/_meta_conversion.py renamed to torchvision/prototype/transforms/functional/_meta.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
55

66

7+
get_dimensions_image_tensor = _FT.get_dimensions
8+
get_dimensions_image_pil = _FP.get_dimensions
9+
10+
711
def _xywh_to_xyxy(xywh: torch.Tensor) -> torch.Tensor:
812
xyxy = xywh.clone()
913
xyxy[..., 2:] += xyxy[..., :2]

torchvision/prototype/transforms/functional/_misc.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,10 @@
22

33
import PIL.Image
44
import torch
5-
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
5+
from torchvision.transforms import functional_tensor as _FT
66
from torchvision.transforms.functional import to_tensor, to_pil_image
77

88

9-
get_dimensions_image_tensor = _FT.get_dimensions
10-
get_dimensions_image_pil = _FP.get_dimensions
11-
129
normalize_image_tensor = _FT.normalize
1310

1411

0 commit comments

Comments
 (0)