"""
这是根据UNet模型搭建出的一个基本网络结构
输入和输出大小是一样的,可以根据需求进行修改
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
# 基本卷积块
class Conv(nn.Module):
def __init__(self, C_in, C_out):
super(Conv, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(C_in, C_out, 3, 1, 1),
nn.BatchNorm2d(C_out),
# 防止过拟合
nn.Dropout(0.3),
nn.LeakyReLU(),
nn.Conv2d(C_out, C_out, 3, 1, 1),
nn.BatchNorm2d(C_out),
# 防止过拟合
nn.Dropout(0.4),
nn.LeakyReLU(),
)
def forward(self, x):
return self.layer(x)
# 下采样模块
class DownSampling(nn.Module):
def __init__(self, C):
super(DownSampling, self).__init__()
self.Down = nn.Sequential(
# 使用卷积进行2倍的下采样,通道数不变
nn.Conv2d(C, C, 3, 2, 1),
nn.LeakyReLU()
)
def forward(self, x):
return self.Down(x)
# 上采样模块
class UpSampling(nn.Module):
def __init__(self, C):
super(UpSampling, self).__init__()
# 特征图大小扩大2倍,通道数减半
self.Up = nn.Conv2d(C, C // 2, 1, 1)
def forward(self, x, r):
# 使用邻近插值进行下采样
up = F.interpolate(x, scale_factor=2, mode="nearest")
x = self.Up(up)
# 拼接,当前上采样的,和之前下采样过程中的
return torch.cat((x, r), 1)
# 主干网络
class UNet(nn.Module):
def __init__(self):
super(UNet, self).__init__()
# 4次下采样
self.C1 = Conv(3, 64)
self.D1 = DownSampling(64)
self.C2 = Conv(64, 128)
self.D2 = DownSampling(128)
self.C3 = Conv(128, 256)
self.D3 = DownSampling(256)
self.C4 = Conv(256, 512)
self.D4 = DownSampling(512)
self.C5 = Conv(512, 1024)
# 4次上采样
self.U1 = UpSampling(1024)
self.C6 = Conv(1024, 512)
self.U2 = UpSampling(512)
self.C7 = Conv(512, 256)
self.U3 = UpSampling(256)
self.C8 = Conv(256, 128)
self.U4 = UpSampling(128)
self.C9 = Conv(128, 64)
self.Th = torch.nn.Sigmoid()
self.pred = torch.nn.Conv2d(64, 3, 3, 1, 1)
def forward(self, x):
# 下采样部分
R1 = self.C1(x)
R2 = self.C2(self.D1(R1))
R3 = self.C3(self.D2(R2))
R4 = self.C4(self.D3(R3))
Y1 = self.C5(self.D4(R4))
# 上采样部分
# 上采样的时候需要拼接起来
O1 = self.C6(self.U1(Y1, R4))
O2 = self.C7(self.U2(O1, R3))
O3 = self.C8(self.U3(O2, R2))
O4 = self.C9(self.U4(O3, R1))
# 输出预测,这里大小跟输入是一致的
# 可以把下采样时的中间抠出来再进行拼接,这样修改后输出就会更小
return self.Th(self.pred(O4))
if __name__ == '__main__':
a = torch.randn(2, 3, 256, 256)
net = UNet()
print(net(a).shape)
第二个
#定义UNet网络
class UNet(nn.Module):
def __init__(self):
super(UNet, self).__init__()
self.max_pool_2x2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.down_conv_1 = double_conv(3, 64)
self.down_conv_2 = double_conv(64, 128)
self.down_conv_3 = double_conv(128, 256)
self.down_conv_4 = double_conv(256, 512)
self.down_conv_5 = double_conv(512, 1024)
self.up_trans_1 = nn.ConvTranspose2d(
in_channels=1024,
out_channels=512,
kernel_size=2,
stride=2
)
self.up_conv_1 = double_conv(1024, 512)
self.up_trans_2 = nn.ConvTranspose2d(
in_channels=512,
out_channels=256,
kernel_size=2,
stride=2
)
self.up_conv_2 = double_conv(512, 256)
self.up_trans_3 = nn.ConvTranspose2d(
in_channels=256,
out_channels=128,
kernel_size=2,
stride=2
)
self.up_conv_3 = double_conv(256, 128)
self.up_trans_4 = nn.ConvTranspose2d(
in_channels=128,
out_channels=64,
kernel_size=2,
stride=2
)
self.up_conv_4 = double_conv(128, 64)
self.out = nn.Conv2d(
in_channels=64,
out_channels=1,
kernel_size=1
)
def forward(self, image):
# expected size
# encoder (Normal convolutions decrease the size)
x1 = self.down_conv_1(image)
# print("x1 "+str(x1.shape))
x2 = self.max_pool_2x2(x1)
# print("x2 "+str(x2.shape))
x3 = self.down_conv_2(x2)
# print("x3 "+str(x3.shape))
x4 = self.max_pool_2x2(x3)
# print("x4 "+str(x4.shape))
x5 = self.down_conv_3(x4)
# print("x5 "+str(x5.shape))
x6 = self.max_pool_2x2(x5)
# print("x6 "+str(x6.shape))
x7 = self.down_conv_4(x6)
# print("x7 "+str(x7.shape))
x8 = self.max_pool_2x2(x7)
# print("x8 "+str(x8.shape))
x9 = self.down_conv_5(x8)
# print("x9 "+str(x9.shape))
# decoder (transposed convolutions increase the size)
x = self.up_trans_1(x9)
x = addPadding(x7, x)
x = self.up_conv_1(torch.cat([x7, x], 1))
x = self.up_trans_2(x)
x = addPadding(x5, x)
x = self.up_conv_2(torch.cat([x5, x], 1))
x = self.up_trans_3(x)
x = addPadding(x3, x)
x = self.up_conv_3(torch.cat([x3, x], 1))
x = self.up_trans_4(x)
x = addPadding(x1, x)
x = self.up_conv_4(torch.cat([x1, x], 1))
x = self.out(x)
# print(x.shape)
return x.to(DEVICE)
第三个
https://github.com/milesial/Pytorch-UNet/blob/master/unet/unet_model.py