forward() takes 1 positional argument but 2 were given #8091
Answered
by
awaelchli
morestart
asked this question in
code help: CV
-
when i try to write a model, i got i guess the error is in UpSample place, but i don't know why... class DownSample(nn.Module):
def __init__(self, in_planes: int, out_planes: int, kernel_size: int):
super(DownSample, self).__init__()
self.down = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=2, padding=1),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU()
)
init_weight.initialize(self)
def forward(self, x):
return self.down(x)
class UpSample(nn.Module):
def __init__(self, in_planes: int, out_planes: int,
kernel_size: int, padding: int, output_padding: int,
apply_dropout: bool = False):
super(UpSample, self).__init__()
self.up = nn.ModuleList()
self.up.append(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride=2,
padding=padding, output_padding=output_padding),
)
self.up.append(nn.BatchNorm2d(out_planes))
if apply_dropout:
self.up.append(nn.Dropout())
self.up.append(nn.LeakyReLU())
init_weight.initialize(self)
def forward(self, inputs):
return self.up(inputs) class MyEncoder(nn.Module):
def __init__(self):
super(MyEncoder, self).__init__()
down_stack = [
pix2pix.DownSample(3, 64, 4),
pix2pix.DownSample(64, 128, 4),
pix2pix.DownSample(128, 256, 4),
pix2pix.DownSample(256, 512, 4),
pix2pix.DownSample(512, 512, 4),
pix2pix.DownSample(512, 512, 4),
pix2pix.DownSample(512, 512, 4),
pix2pix.DownSample(512, 512, 4),
]
self.encoder = nn.ModuleList()
for item in down_stack:
self.encoder.append(item)
def forward(self, inputs):
feat = inputs
for i in range(len(self.encoder)):
feat = self.encoder[i](feat)
return feat
class MyDecoder(nn.Module):
def __init__(self):
super(MyDecoder, self).__init__()
up_stack = [
pix2pix.UpSample(512, 512, 4, 1, 1, True),
pix2pix.UpSample(512, 512, 4, 1, 1, True),
pix2pix.UpSample(512, 512, 4, 1, 1, True),
pix2pix.UpSample(512, 512, 4, 1, 1, True),
pix2pix.UpSample(512, 256, 4, 1, 1, True),
pix2pix.UpSample(256, 128, 4, 1, 1, True),
pix2pix.UpSample(256, 128, 4, 1, 1, True),
pix2pix.UpSample(128, 64, 4, 1, 1, True),
]
self.up = nn.ModuleList()
for item in up_stack:
self.up.append(item)
def forward(self, inputs):
return self.up(inputs)
class MyNet(pl.LightningModule):
def __init__(self):
super(MyNet, self).__init__()
self.encoder = MyEncoder()
self.decoder = MyDecoder()
def forward(self, inputs):
feat = self.encoder(inputs)
feat = self.decoder(feat)
return feat
if __name__ == '__main__':
from torchsummaryX import summary
import torch
x = torch.ones((1, 3, 512, 512))
u = UNet()
summary(model=u, x=x) |
Beta Was this translation helpful? Give feedback.
Answered by
awaelchli
Jun 25, 2021
Replies: 2 comments 1 reply
-
Hi, have a look at the full stack trace so you know which of the forward methods of these different have you verified that |
Beta Was this translation helpful? Give feedback.
0 replies
Answer selected by
morestart
-
Adrian Wälchli ***@***.***>于2021年6月25日 周五08:48写道:
Hi, have a look at the full stack trace so you know which of the forward
methods of these different nn.Modules is meant.
have you verified that
u(x)
works?
—
You are receiving this because you authored the thread.
Reply to this email directly, view it on GitHub
<#8091 (comment)>,
or unsubscribe
<https://github.com/notifications/unsubscribe-auth/AIPI3S7TJJXVU55SGJAM3ZLTUPG7NANCNFSM47E3RCPA>
.
Hi, thanks for your reply,i resolved this problem. 😀
|
Beta Was this translation helpful? Give feedback.
1 reply
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi, have a look at the full stack trace so you know which of the forward methods of these different
nn.Modules
is meant.have you verified that
u(x)
works?