dpn网络的pytorch实现方式
我就废话不多说了,直接上代码吧!
importtorch
importtorch.nnasnn
importtorch.nn.functionalasF
classCatBnAct(nn.Module):
def__init__(self,in_chs,activation_fn=nn.ReLU(inplace=True)):
super(CatBnAct,self).__init__()
self.bn=nn.BatchNorm2d(in_chs,eps=0.001)
self.act=activation_fn
defforward(self,x):
x=torch.cat(x,dim=1)ifisinstance(x,tuple)elsex
returnself.act(self.bn(x))
classBnActConv2d(nn.Module):
def__init__(self,s,out_chs,kernel_size,stride,
padding=0,groups=1,activation_fn=nn.ReLU(inplace=True)):
super(BnActConv2d,self).__init__()
self.bn=nn.BatchNorm2d(in_chs,eps=0.001)
self.act=activation_fn
self.conv=nn.Conv2d(in_chs,out_chs,kernel_size,stride,padding,groups=groups,bias=False)
defforward(self,x):
returnself.conv(self.act(self.bn(x)))
classInputBlock(nn.Module):
def__init__(self,num_init_features,kernel_size=7,
padding=3,activation_fn=nn.ReLU(inplace=True)):
super(InputBlock,self).__init__()
self.conv=nn.Conv2d(
3,num_init_features,kernel_size=kernel_size,stride=2,padding=padding,bias=False)
self.bn=nn.BatchNorm2d(num_init_features,eps=0.001)
self.act=activation_fn
self.pool=nn.MaxPool2d(kernel_size=3,stride=2,padding=1)
defforward(self,x):
x=self.conv(x)
x=self.bn(x)
x=self.act(x)
x=self.pool(x)
returnx
classDualPathBlock(nn.Module):
def__init__(
self,in_chs,num_1x1_a,num_3x3_b,num_1x1_c,inc,groups,block_type='normal',b=False):
super(DualPathBlock,self).__init__()
self.num_1x1_c=num_1x1_c
self.inc=inc
self.b=b
ifblock_typeis'proj':
self.key_stride=1
self.has_proj=True
elifblock_typeis'down':
self.key_stride=2
self.has_proj=True
else:
assertblock_typeis'normal'
self.key_stride=1
self.has_proj=False
ifself.has_proj:
#Usingdifferentmembernamesheretoalloweasierparameterkeymatchingforconversion
ifself.key_stride==2:
self.c1x1_w_s2=BnActConv2d(
in_chs=in_chs,out_chs=num_1x1_c+2*inc,kernel_size=1,stride=2)
else:
self.c1x1_w_s1=BnActConv2d(
in_chs=in_chs,out_chs=num_1x1_c+2*inc,kernel_size=1,stride=1)
self.c1x1_a=BnActConv2d(in_chs=in_chs,out_chs=num_1x1_a,kernel_size=1,stride=1)
self.c3x3_b=BnActConv2d(
in_chs=num_1x1_a,out_chs=num_3x3_b,kernel_size=3,
stride=self.key_stride,padding=1,groups=groups)
ifb:
self.c1x1_c=CatBnAct(in_chs=num_3x3_b)
self.c1x1_c1=nn.Conv2d(num_3x3_b,num_1x1_c,kernel_size=1,bias=False)
self.c1x1_c2=nn.Conv2d(num_3x3_b,inc,kernel_size=1,bias=False)
else:
self.c1x1_c=BnActConv2d(in_chs=num_3x3_b,out_chs=num_1x1_c+inc,kernel_size=1,stride=1)
defforward(self,x):
x_in=torch.cat(x,dim=1)ifisinstance(x,tuple)elsex
ifself.has_proj:
ifself.key_stride==2:
x_s=self.c1x1_w_s2(x_in)
else:
x_s=self.c1x1_w_s1(x_in)
x_s1=x_s[:,:self.num_1x1_c,:,:]
x_s2=x_s[:,self.num_1x1_c:,:,:]
else:
x_s1=x[0]
x_s2=x[1]
x_in=self.c1x1_a(x_in)
x_in=self.c3x3_b(x_in)
ifself.b:
x_in=self.c1x1_c(x_in)
out1=self.c1x1_c1(x_in)
out2=self.c1x1_c2(x_in)
else:
x_in=self.c1x1_c(x_in)
out1=x_in[:,:self.num_1x1_c,:,:]
out2=x_in[:,self.num_1x1_c:,:,:]
resid=x_s1+out1
dense=torch.cat([x_s2,out2],dim=1)
returnresid,dense
classDPN(nn.Module):
def__init__(self,small=False,num_init_features=64,k_r=96,groups=32,
b=False,k_sec=(3,4,20,3),inc_sec=(16,32,24,128),
num_classes=1000,test_time_pool=False):
super(DPN,self).__init__()
self.test_time_pool=test_time_pool
self.b=b
bw_factor=1ifsmallelse4
blocks=OrderedDict()
#conv1
ifsmall:
blocks['conv1_1']=InputBlock(num_init_features,kernel_size=3,padding=1)
else:
blocks['conv1_1']=InputBlock(num_init_features,kernel_size=7,padding=3)
#conv2
bw=64*bw_factor
inc=inc_sec[0]
r=(k_r*bw)//(64*bw_factor)
blocks['conv2_1']=DualPathBlock(num_init_features,r,r,bw,inc,groups,'proj',b)
in_chs=bw+3*inc
foriinrange(2,k_sec[0]+1):
blocks['conv2_'+str(i)]=DualPathBlock(in_chs,r,r,bw,inc,groups,'normal',b)
in_chs+=inc
#conv3
bw=128*bw_factor
inc=inc_sec[1]
r=(k_r*bw)//(64*bw_factor)
blocks['conv3_1']=DualPathBlock(in_chs,r,r,bw,inc,groups,'down',b)
in_chs=bw+3*inc
foriinrange(2,k_sec[1]+1):
blocks['conv3_'+str(i)]=DualPathBlock(in_chs,r,r,bw,inc,groups,'normal',b)
in_chs+=inc
#conv4
bw=256*bw_factor
inc=inc_sec[2]
r=(k_r*bw)//(64*bw_factor)
blocks['conv4_1']=DualPathBlock(in_chs,r,r,bw,inc,groups,'down',b)
in_chs=bw+3*inc
foriinrange(2,k_sec[2]+1):
blocks['conv4_'+str(i)]=DualPathBlock(in_chs,r,r,bw,inc,groups,'normal',b)
in_chs+=inc
#conv5
bw=512*bw_factor
inc=inc_sec[3]
r=(k_r*bw)//(64*bw_factor)
blocks['conv5_1']=DualPathBlock(in_chs,r,r,bw,inc,groups,'down',b)
in_chs=bw+3*inc
foriinrange(2,k_sec[3]+1):
blocks['conv5_'+str(i)]=DualPathBlock(in_chs,r,r,bw,inc,groups,'normal',b)
in_chs+=inc
blocks['conv5_bn_ac']=CatBnAct(in_chs)
self.features=nn.Sequential(blocks)
#Using1x1convfortheFClayertoallowtheextrapoolingscheme
self.last_linear=nn.Conv2d(in_chs,num_classes,kernel_size=1,bias=True)
deflogits(self,features):
ifnotself.trainingandself.test_time_pool:
x=F.avg_pool2d(features,kernel_size=7,stride=1)
out=self.last_linear(x)
#Theextratesttimepoolshouldbepoolinganimg_size//32-6sizepatch
out=adaptive_avgmax_pool2d(out,pool_type='avgmax')
else:
x=adaptive_avgmax_pool2d(features,pool_type='avg')
out=self.last_linear(x)
returnout.view(out.size(0),-1)
defforward(self,input):
x=self.features(input)
x=self.logits(x)
returnx
"""PyTorchselectableadaptivepooling
Adaptivepoolingwiththeabilitytoselectthetypeofpoolingfrom:
*'avg'-Averagepooling
*'max'-Maxpooling
*'avgmax'-Sumofaverageandmaxpoolingre-scaledby0.5
*'avgmaxc'-Concatenationofaverageandmaxpoolingalongfeaturedim,doublesfeaturedim
Bothafunctionalandann.Moduleversionofthepoolingisprovided.
"""
defpooling_factor(pool_type='avg'):
return2ifpool_type=='avgmaxc'else1
defadaptive_avgmax_pool2d(x,pool_type='avg',padding=0,count_include_pad=False):
"""Selectableglobalpoolingfunctionwithdynamicinputkernelsize
"""
ifpool_type=='avgmaxc':
x=torch.cat([
F.avg_pool2d(
x,kernel_size=(x.size(2),x.size(3)),padding=padding,count_include_pad=count_include_pad),
F.max_pool2d(x,kernel_size=(x.size(2),x.size(3)),padding=padding)
],dim=1)
elifpool_type=='avgmax':
x_avg=F.avg_pool2d(
x,kernel_size=(x.size(2),x.size(3)),padding=padding,count_include_pad=count_include_pad)
x_max=F.max_pool2d(x,kernel_size=(x.size(2),x.size(3)),padding=padding)
x=0.5*(x_avg+x_max)
elifpool_type=='max':
x=F.max_pool2d(x,kernel_size=(x.size(2),x.size(3)),padding=padding)
else:
ifpool_type!='avg':
print('Invalidpooltype%sspecified.Defaultingtoaveragepooling.'%pool_type)
x=F.avg_pool2d(
x,kernel_size=(x.size(2),x.size(3)),padding=padding,count_include_pad=count_include_pad)
returnx
classAdaptiveAvgMaxPool2d(torch.nn.Module):
"""Selectableglobalpoolinglayerwithdynamicinputkernelsize
"""
def__init__(self,output_size=1,pool_type='avg'):
super(AdaptiveAvgMaxPool2d,self).__init__()
self.output_size=output_size
self.pool_type=pool_type
ifpool_type=='avgmaxc'orpool_type=='avgmax':
self.pool=nn.ModuleList([nn.AdaptiveAvgPool2d(output_size),nn.AdaptiveMaxPool2d(output_size)])
elifpool_type=='max':
self.pool=nn.AdaptiveMaxPool2d(output_size)
else:
ifpool_type!='avg':
print('Invalidpooltype%sspecified.Defaultingtoaveragepooling.'%pool_type)
self.pool=nn.AdaptiveAvgPool2d(output_size)
defforward(self,x):
ifself.pool_type=='avgmaxc':
x=torch.cat([p(x)forpinself.pool],dim=1)
elifself.pool_type=='avgmax':
x=0.5*torch.sum(torch.stack([p(x)forpinself.pool]),0).squeeze(dim=0)
else:
x=self.pool(x)
returnx
deffactor(self):
returnpooling_factor(self.pool_type)
def__repr__(self):
returnself.__class__.__name__+'('\
+'output_size='+str(self.output_size)\
+',pool_type='+self.pool_type+')'
以上这篇dpn网络的pytorch实现方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。
声明:本文内容来源于网络,版权归原作者所有,内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎发送邮件至:czq8825#qq.com(发邮件时,请将#更换为@)进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。
