Inception V3--J9
网络结构1--InceptionA
InceptionA中网络结构主要分为四块,分别是1*1的卷积块,5*5的卷积块以及双3*3的卷积块和池化层。首先,特征经过1*1的卷积块进行1*1的特征提取,保证小尺度的特征能够被提取,然后进入5*5的卷积块,在5*5的卷积块中先进行1*1的卷积降维,使得通道数缩减,降低计算量,然后进行5*5的大尺度特征提取,之后进入双3*3的卷积块,在该卷积块中,首先会进入1*1的卷积块进行降维,然后通过第一层3*3的卷积块,在提取特征的同时,进行维度提升,之后进入第二层的3*3模块,进行特征提取,实现中尺度的特征提取,最后进入池化块,进行特征增强,这便是整体的InceptionA模块。
统筹来看,InceotionA模块实现的整体功能便是进行1*1,3*3,5*5和池化层的特征提取, 然后将提取的特征合并,作为最后的总特征。
# V3版本class InceptionA(nn.Module):def __init__(self,in_channels,pool_features):# 输入特征图的通道数 池化分支的输出通道数super(InceptionA,self).__init__()#小特征卷积,转换通道self.branch1x1=BasicConv2d(in_channels,64,kernel_size=1)#中尺度特征提取,先1*1降维,然后5*5卷积,self.branch5x5_1=BasicConv2d(in_channels,48,kernel_size=1)self.branch5x5_2=BasicConv2d(48,64,kernel_size=5,padding=2)#双3*3卷积 self.branch3x3dbl_1=BasicConv2d(in_channels,64,kernel_size=1)self.branch3x3dbl_2=BasicConv2d(64,96,kernel_size=3,padding=1)self.branch3x3dbl_3=BasicConv2d(96,96,kernel_size=3,padding=1)# 平均池化self.branch_pool=BasicConv2d(in_channels,pool_features,kernel_size=1)def forward(self,x):# 先1*1的卷积branch1x1=self.branch1x1(x)# 在大尺度特征提取,降维然后卷积branch5x5=self.branch5x5_1(x)branchx5x5=self.branch5x5_2(branchx5x5)# 双3*3的卷积网络branch3x3dbl=self.branch3x3dbl_1(x)branch3x3dbl=self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl=self.branch3x3dbl_3(branch3x3dbl)#池化层branch_pool=F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)branch_pool=self.branch_pool(branch_pool)# 最后特征合并outputs=[branch1x1,branch5x5,branch3x3dbl,branch_pool]return torch.cat(outputs,1)
网络结构2--InceptionB
InceptionB的网络结构与InceptionA类似,不过不同的是InceptionB采用的四层是1*1的卷积层,两个分别是1*7和7*1的卷积层,四个7*7分解的卷积层,还有最后一层池化层。其中这六层7*7卷积层不同点在于,分别采用的是1*7和7*1的卷积核,与普通7*7的卷积核不同,这样的改变不仅能够实现7*7的提取效果,同时还能够实现计算量的降低,提升计算速度。而为什么不将全部的卷积核都修改为这种拆分的情况呢,原因在于,这种1*7和7*1的卷积核提取的特征是由方向性的,而7*7提取的效果是全局性的,在某些应用场景下1*7和7*1的效果确实会优于7*7的卷积核同时也能大大提高计算速度,但显示他的使用场景是存在限制的,故而不能全部使用。
class InceptionB(nn.Module):def __init__(self,in_channels,channels_7x7):super(InceptionB,self).__init__()# 使用1*1的卷积self.branch1x1=BasicConv2d(in_channels,192,kernel_size=1)# 中等尺度卷积 --使用1*7和7*1的卷积核代替7*7的卷积核,降低计算量c7=channels_7x7self.branch7x7_1=BasicConv2d(in_channels,c7,kernel_size=1)self.branch7x7_2=BasicConv2d(c7,c7,kernel_size=(1,7),padding=(0,3))self.branch7x7_3=BasicConv2d(c7,192,kernel_size=(7,1),padding=(3,0))# 多次7*7的分解卷积分支self.branch7x7dbl_1=BasicConv2d(in_channels,c7,kernel_size=1)self.branch7x7dbl_2=BasicConv2d(c7,c7,kernel_size=(7,1),padding=(3,0))self.branch7x7dbl_3=BasicConv2d(c7,c7,kernel_size=(1,7),padding=(0,3))self.branch7x7dbl_4=BasicConv2d(c7,c7,kernel_size=(7,1),padding=(3,0))self.branch7x7dbl_5=BasicConv2d(c7,192,kernel_size=(1,7),padding=(0,3))# 平均池化,输入-192self.branch_pool=BasicConv2d(in_channels,192,kernel_size=1)def forward(self,x):branch1x1=self.branch1x1(x)branch7x7=self.branch7x7_1(x)branch7x7=self.branch7x7_2(branch7x7)branch7x7=self.branch7x7_3(branch7x7)branch7x7dbl=self.branch7x7dbl_1(x)branch7x7dbl=self.branch7x7dbl_2(branch7x7dbl)branch7x7dbl=self.branch7x7dbl_3(branch7x7dbl)branch7x7dbl=self.branch7x7dbl_4(branch7x7dbl)branch7x7dbl=self.branch7x7dbl_5(branch7x7dbl)branch_pool=F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)branch_pool=self.branch_pool(branch_pool)# 将卷积后的特征进行合并 1*1卷积 ,7*7卷积 多次7*7卷积 池化层 四层的卷积特征合并outputs=[branch1x1,branch7x7,branch7x7dbl,branch_pool]return torch.cat(outputs,1)
网络结构3--InceptionC
InceptionC与A,B类似,同样有四个模块,分别是第一层1*1卷积块,第二层3*3分解卷积块,第三层双3*3卷积块,以及第四层池化层。其中第二层3*3分解块是通过1*3和3*1共同实现的。在第三层中既有3*3的全局卷积池化层,又有分解的3*3卷积层,整体网络侧重点在于对于3*3模块的中尺度卷积。
class InceptionC(nn.Module):def __init__(self,in_channels):super(InceptionC,self).__init__()# 1*1的卷积分支self.branch1x1=BasicConv2d(in_channels,320,kernel_size=1)#3*3的卷积分支self.branch3x3_1=BasicConv2d(in_channels,384,kernel_size=1)self.branch3x3_2a=BasicConv2d(384,384,kernel_size=(1,3),padding=(0,1))self.branch3x3_2b=BasicConv2d(384,384,kernel_size=(3,1),padding=(1,0))# 双3*3的分支,第一个3*3,第二个1*3+3*1self.branch3x3dbl_1=BasicConv2d(in_channels,448,kernel_size=1)self.branch3x3dbl_2=BasicConv2d(448,384,kernel_size=3,padding=1)self.branch3x3dbl_3a=BasicConv2d(384,384,kernel_size=(1,3),padding=(0,1))self.branch3x3dbl_3b=BasicConv2d(384,384,kernel_size=(3,1),padding=(1,0))# 平均池化层self.branch_pool=BasicConv2d(in_channels,192,kernel_size=1)def forward(self,x):branch1x1=self.branch1x1(x)branch3x3=self.branch3x3_1(x)branch3x3=[self.branch3x3_2a(branch3x3),self.branch3x3_2b(branch3x3)]branch3x3=torch.cat(branch3x3,1)branch3x3dbl=torch.cat(branch3x3dbl,1)branch_pool=F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)branch_pool=self.branch_pool(branch_pool)# 合并特征outputs=[branch1x1,branch3x3,branch3x3dbl,branch_pool]return torch.cat(outputs,1)
网络结构4-ReductionA
在ReductionA网络结构中,采用三层网络结构,分别是3*3的卷积层,双3*3的卷积层以及池化层,该网络结构的作用是在增强特征的同时降低参数量,主要目的是降低参数量,从而加快训练速度。
# 降维模块
class ReductionA(nn.Module):def __init__(self,in_channels):# (-1,288,35,35) super(ReductionA,self).__init__()# 直接3*3的卷积下采样, 将通道转换为384通道# (-1,288,35,35)->(-1,384,17,17)self.branch3x3=BasicConv2d(in_channels,384,kernel_size=3,stride=2)#分支2 1*1+3*3+3*3的串联卷积通道# (-1,288,35,35)->(-1,64,35,35)self.branch3x3dbl_1=BasicConv2d(in_channels,64,kernel_size=1) #先换通道# (-1,64,35,35)->(-1,96,35,35)self.branch3x3dbl_2=BasicConv2d(64,96,kernel_size=3,padding=1) #在升维卷积# (-1,96,35,35)->(-1,96,17,17)self.branch3x3dbl_3=BasicConv2d(96,96,kernel_size=3,stride=2)#卷积def forward(self,x):branch3x3=self.branch3x3(x)branch3x3dbl=self.branch3x3dbl_1(x)branch3x3dbl=self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl=self.branch3x3dbl_3(branch3x3dbl)# (-1,288,35,35)->(-1,288,17,17)branch_pool=F.max_pool2d(x,kernel_size=3,stride=2) #最后再最大池化下采样# 合并特征 (-1,384,17,17) (-1,96,17,17) (-1,288,17,17)->(-1,768,17,17) outputs=[branch3x3,branch3x3dbl,branch_pool]return torch.cat(outputs,1)
网络结构5-ReductionB
ReductionB的结构与A类似,不同的是ReductionB中采用的3层网络结构是3*3的卷积,7*7分解和3*3的卷积,最后一层池化层。作用与A类似,同样是在强化特征的的情况下尽可能减少参数量。
class ReductionB(nn.Module):def __init__(self,in_channels):super(ReductionB,self).__init__()# 输入模块 1*1的卷积核+3*3的卷积核下采样self.branch3x3_1=BasicConv2d(in_channels,192,kernel_size=1)self.branch3x3_2=BasicConv2d(192,320,kernel_size=3,stride=2)# 1*1+1*7+7*1+3*3串联下采样self.branch7x7x3_1=BasicConv2d(in_channels,192,kernel_size=1)self.branch7x7x3_2=BasicConv2d(192,192,kernel_size=(1,7),padding=(0,3))self.branch7x7x3_3=BasicConv2d(192,192,kernel_size=(7,1),padding=(3,0))self.branch7x7x3_4=BasicConv2d(192,192,kernel_size=3,stride=2)def forward(self,x):branch3x3=self.branch3x3_1(x)branch3x3=self.branch3x3_2(branch3x3)branch7x7x3=self.branch7x7x3_1(x)branch7x7x3=self.branch7x7x3_2(branch7x7x3)branch7x7x3=self.branch7x7x3_3(branch7x7x3)branch7x7x3=self.branch7x7x3_4(branch7x7x3)branch_pool=F.max_pool2d(x,kernel_size=3,stride=2)# 将三次卷积后的特征合并outputs=[branch3x3,branch7x7x3,branch_pool]return torch.cat(outputs,1)
网络结构6-InceptionAux
InceptionAux为Inception网络的辅助分支,首先利用5*5的池化层进行特征缩减,然后再利用1*1的卷积层使得通道数压缩到128层,之后利用5*5的卷积层进行升维,然后全连接接回去。之所以使用如此通道的变化,是为了避免在网络层数很深的时候出现的梯度爆炸现象。为什么这样会避免梯度爆炸,要解释这一点首先要明确梯度爆炸是怎么来的,梯度爆炸是由于网络层数过大时,参数会被逐渐放大至无穷大产生的梯度爆炸,而该辅助结构改变的是整体网络的路径,使得网络相当于从头再来,自然就会一定程度上避免梯度爆炸现象。
# 辅助分支
class InceptionAux(nn.Module):def __init__(self,in_channels,num_classes):super(InceptionAux,self).__init__()# 将主网络的中间层压缩到128层self.conv0=BasicConv2d(in_channels,128,kernel_size=1)# 5*5的卷积核进行提取self.conv1=BasicConv2d(128,768,kernel_size=5)self.conv1.stddev=0.01 #自定义权重初始化参数标记self.fc=nn.Linear(768,num_classes) #全连接层最终分类self.fc.stddev=0.001 #全连接层的权重使用标准差def forward(self,x):x=F.avg_pool2d(x,kernel_size=5,stride=3)x=self.conv0(x)x=self.conv1(x)x=x.view(x.size(0),-1)x=self.fc(x)return x
整体网络结构:
在网络结构中,首先先利用几层卷积层实现下采样,提取一些基础的特征基础,随着通道数目的加深不断提取特征细节,同时通过这几层降低参数量并且增加网络的非线性拟合能力。然后进入InceptionA,B,C模块,进行细致的特征提取,最后利用全连接层将网络结构映射到类别上,实现分类。
# 整体网络结构import torch.nn.functional as Fclass BasicConv2d(nn.Module):def __init__(self,in_channels,out_channels,kernel_size,stride=1,padding=0):super(BasicConv2d,self).__init__()self.conv=nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation,groups=groups,bias=bias)self.bn=nn.BatchNorm2d(num_features=out_channels)def forward(self,x):x=self.conv(x)x=self.bn(x)x=F.relu(x,inplace=True)return xclass InceptionV3(nn.Module):def __init__(self,num_classes=1000,aux_logits=False,transform_input=False):# 分类类别数目,也就是最后的输出数目 ,是否启用辅助分支,是否对图像预处理super(InceptionV3,self).__init__()self.aux_logits=aux_logitsself.transform_input=transform_input# 浅层卷积,下采样 3->192self.conv2d_1a_33=BasicConv2d(3,32,kernel_size=3,stride=2)self.conv2d_2a_33=BasicConv2d(32,32,kernel_size=3)self.conv2d_2b_33=BasicConv2d(32,64,kernel_size=3,padding=1)self.conv2d_3b_11=BasicConv2d(64,80,kernel_size=1)# (-1,80,147,147)->(-1,192,144,144)self.conv2d_4a_33=BasicConv2d(80,192,kernel_size=3)# InceptionA块,多尺度特征提取,+降维 (-1,192,35,35)->(-1,256,35,35)self.Mixed_5b=InceptionA(192,pool_features=32)# (-1,256,35,35)->(-1,288,35,35)self.Mixed_5c=InceptionA(256,pool_features=64)# (-1,288,35,35)->(-1,288,35,35)self.Mixed_5d=InceptionA(288,pool_features=64)# (-1,288,35,35) 352800 ->(-1,768,17,17) 221952self.Mixed_6a=ReductionA(288)# InceptionB块,实现7*7的卷积提取self.Mixed_6b=InceptionB(768,channels_7x7=128)self.Mixed_6c=InceptionB(768,channels_7x7=160)self.Mixed_6d=InceptionB(768,channels_7x7=160)self.Mixed_6e=InceptionB(768,channels_7x7=192)if aux_logits:self.AuxLogits=InceptionAux(768,num_classes)#分解加InceptionC块的卷积实现 self.Mixed_7a=ReductionB(768)self.Mixed_7b=ReductionC(1280)self.Mixed_7c=ReductionC(2048)self.fc=nn.Linear(2048,num_classes)def forward(self,x):if self.transform_input:x=x.clone()x[:,0]=x[:,0]*(0.229/0.5)+(0.485-0.5)/0.5x[:,1]=x[:,1]*(0.224/0.5)+(0.456-0.5)/0.5x[:,2]=x[:,2]*(0.225/0.5)+(0.406-0.5)/0.5# (-1,3,299,299) ->(-1,32,149,149)x=self.conv2d_1a_33(x)# (-1,32,149,149)->(-1,32,147,147)x=self.conv2d_2a_33(x)# (-1,32,147,147)->(-1,64,147,147)x=self.conv2d_2b_33(x)# (-1,64,147,147)->(-1,64,73,73)x=F.max_pool2d(x,kernel_size=3,stride=2)# (-1,64,73,73)->(-1,80,73,73)x=self.conv2d_3b_11(x)# (-1,80,73,73)->(-1,192,71,71)x=self.conv2d_4a_33(x)# (-1,192,71,71)->(-1,192,35,35) x=F.max_pool2d(x,kernel_size=3,stride=2)# (-1,192,35,35)->()x=self.Mixed_5b(x)x=self.Mixed_5c(x)x=self.Mixed_5d(x)x=self.Mixed_6a(x)x=self.Mixed_6b(x)x=self.Mixed_6c(x)x=self.Mixed_6d(x)x=self.Mixed_6e(x)if self.training and self.aux_logits:aux=self.AuxLogits(x)x=self.Mixed_7a(x)x=self.Mixed_7b(x)x=self.Mixed_7c(x)x=F.avg_pool2d(x,kernel_size=8)x=F.dropout(x,training=self.training)x=x.view(x.size(0),-1)x=self.fc(x)if self.training and self.aux_logits:return x,auxreturn x