Skip to content

Commit be73d6d

Browse files
committed
clean and add more comments
1 parent 0ff1cf7 commit be73d6d

13 files changed

+336
-225
lines changed

.github/workflows/no-response.yml

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
11
name: No Response
22

3+
# TODO: it seems not to work
34
# Modified from: https://raw.githubusercontent.com/github/docs/main/.github/workflows/no-response.yaml
45

5-
# **What it does**: Closes issues that don't have enough information to be
6-
# actionable.
7-
# **Why we have it**: To remove the need for maintainers to remember to check
8-
# back on issues periodically to see if contributors have
9-
# responded.
6+
# **What it does**: Closes issues that don't have enough information to be actionable.
7+
# **Why we have it**: To remove the need for maintainers to remember to check back on issues periodically
8+
# to see if contributors have responded.
109
# **Who does it impact**: Everyone that works on docs or docs-internal.
1110

1211
on:

gfpgan/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
from .data import *
44
from .models import *
55
from .utils import *
6-
from .version import __gitsha__, __version__
6+
from .version import *

gfpgan/archs/arcface_arch.py

Lines changed: 57 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,27 @@
22
from basicsr.utils.registry import ARCH_REGISTRY
33

44

5-
def conv3x3(in_planes, out_planes, stride=1):
6-
"""3x3 convolution with padding"""
7-
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
5+
def conv3x3(inplanes, outplanes, stride=1):
6+
"""A simple wrapper for 3x3 convolution with padding.
7+
8+
Args:
9+
inplanes (int): Channel number of inputs.
10+
outplanes (int): Channel number of outputs.
11+
stride (int): Stride in convolution. Default: 1.
12+
"""
13+
return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False)
814

915

1016
class BasicBlock(nn.Module):
11-
expansion = 1
17+
"""Basic residual block used in the ResNetArcFace architecture.
18+
19+
Args:
20+
inplanes (int): Channel number of inputs.
21+
planes (int): Channel number of outputs.
22+
stride (int): Stride in convolution. Default: 1.
23+
downsample (nn.Module): The downsample module. Default: None.
24+
"""
25+
expansion = 1 # output channel expansion ratio
1226

1327
def __init__(self, inplanes, planes, stride=1, downsample=None):
1428
super(BasicBlock, self).__init__()
@@ -40,7 +54,16 @@ def forward(self, x):
4054

4155

4256
class IRBlock(nn.Module):
43-
expansion = 1
57+
"""Improved residual block (IR Block) used in the ResNetArcFace architecture.
58+
59+
Args:
60+
inplanes (int): Channel number of inputs.
61+
planes (int): Channel number of outputs.
62+
stride (int): Stride in convolution. Default: 1.
63+
downsample (nn.Module): The downsample module. Default: None.
64+
use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True.
65+
"""
66+
expansion = 1 # output channel expansion ratio
4467

4568
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
4669
super(IRBlock, self).__init__()
@@ -78,7 +101,15 @@ def forward(self, x):
78101

79102

80103
class Bottleneck(nn.Module):
81-
expansion = 4
104+
"""Bottleneck block used in the ResNetArcFace architecture.
105+
106+
Args:
107+
inplanes (int): Channel number of inputs.
108+
planes (int): Channel number of outputs.
109+
stride (int): Stride in convolution. Default: 1.
110+
downsample (nn.Module): The downsample module. Default: None.
111+
"""
112+
expansion = 4 # output channel expansion ratio
82113

83114
def __init__(self, inplanes, planes, stride=1, downsample=None):
84115
super(Bottleneck, self).__init__()
@@ -116,10 +147,16 @@ def forward(self, x):
116147

117148

118149
class SEBlock(nn.Module):
150+
"""The squeeze-and-excitation block (SEBlock) used in the IRBlock.
151+
152+
Args:
153+
channel (int): Channel number of inputs.
154+
reduction (int): Channel reduction ration. Default: 16.
155+
"""
119156

120157
def __init__(self, channel, reduction=16):
121158
super(SEBlock, self).__init__()
122-
self.avg_pool = nn.AdaptiveAvgPool2d(1)
159+
self.avg_pool = nn.AdaptiveAvgPool2d(1) # pool to 1x1 without spatial information
123160
self.fc = nn.Sequential(
124161
nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel),
125162
nn.Sigmoid())
@@ -133,13 +170,23 @@ def forward(self, x):
133170

134171
@ARCH_REGISTRY.register()
135172
class ResNetArcFace(nn.Module):
173+
"""ArcFace with ResNet architectures.
174+
175+
Ref: ArcFace: Additive Angular Margin Loss for Deep Face Recognition.
176+
177+
Args:
178+
block (str): Block used in the ArcFace architecture.
179+
layers (tuple(int)): Block numbers in each layer.
180+
use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True.
181+
"""
136182

137183
def __init__(self, block, layers, use_se=True):
138184
if block == 'IRBlock':
139185
block = IRBlock
140186
self.inplanes = 64
141187
self.use_se = use_se
142188
super(ResNetArcFace, self).__init__()
189+
143190
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False)
144191
self.bn1 = nn.BatchNorm2d(64)
145192
self.prelu = nn.PReLU()
@@ -153,6 +200,7 @@ def __init__(self, block, layers, use_se=True):
153200
self.fc5 = nn.Linear(512 * 8 * 8, 512)
154201
self.bn5 = nn.BatchNorm1d(512)
155202

203+
# initialization
156204
for m in self.modules():
157205
if isinstance(m, nn.Conv2d):
158206
nn.init.xavier_normal_(m.weight)
@@ -163,7 +211,7 @@ def __init__(self, block, layers, use_se=True):
163211
nn.init.xavier_normal_(m.weight)
164212
nn.init.constant_(m.bias, 0)
165213

166-
def _make_layer(self, block, planes, blocks, stride=1):
214+
def _make_layer(self, block, planes, num_blocks, stride=1):
167215
downsample = None
168216
if stride != 1 or self.inplanes != planes * block.expansion:
169217
downsample = nn.Sequential(
@@ -173,7 +221,7 @@ def _make_layer(self, block, planes, blocks, stride=1):
173221
layers = []
174222
layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))
175223
self.inplanes = planes
176-
for _ in range(1, blocks):
224+
for _ in range(1, num_blocks):
177225
layers.append(block(self.inplanes, planes, use_se=self.use_se))
178226

179227
return nn.Sequential(*layers)

0 commit comments

Comments
 (0)