把vgg-face.mat权重迁移到pytorch模型示例
最近使用pytorch时,需要用到一个预训练好的人脸识别模型提取人脸ID特征,想到很多人都在用用vgg-face,但是vgg-face没有pytorch的模型,于是写个vgg-face.mat转到pytorch模型的代码
#!/usr/bin/envpython2 #-*-coding:utf-8-*- """ CreatedonThuMay1010:41:402018 @author:hy """ importtorch importmath importtorch.nnasnn fromtorch.autogradimportVariable importnumpyasnp fromscipy.ioimportloadmat importscipy.miscassm importmatplotlib.pyplotasplt classvgg16_face(nn.Module): def__init__(self,num_classes=2622): super(vgg16_face,self).__init__() inplace=True self.conv1_1=nn.Conv2d(3,64,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu1_1=nn.ReLU(inplace) self.conv1_2=nn.Conv2d(64,64,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu1_2=nn.ReLU(inplace) self.pool1=nn.MaxPool2d(kernel_size=(2,2),stride=(2,2),dilation=(1,1),ceil_mode=False) self.conv2_1=nn.Conv2d(64,128,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu2_1=nn.ReLU(inplace) self.conv2_2=nn.Conv2d(128,128,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu2_2=nn.ReLU(inplace) self.pool2=nn.MaxPool2d(kernel_size=(2,2),stride=(2,2),dilation=(1,1),ceil_mode=False) self.conv3_1=nn.Conv2d(128,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu3_1=nn.ReLU(inplace) self.conv3_2=nn.Conv2d(256,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu3_2=nn.ReLU(inplace) self.conv3_3=nn.Conv2d(256,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu3_3=nn.ReLU(inplace) self.pool3=nn.MaxPool2d(kernel_size=(2,2),stride=(2,2),dilation=(1,1),ceil_mode=False) self.conv4_1=nn.Conv2d(256,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu4_1=nn.ReLU(inplace) self.conv4_2=nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu4_2=nn.ReLU(inplace) self.conv4_3=nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu4_3=nn.ReLU(inplace) self.pool4=nn.MaxPool2d(kernel_size=(2,2),stride=(2,2),dilation=(1,1),ceil_mode=False) self.conv5_1=nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu5_1=nn.ReLU(inplace) self.conv5_2=nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu5_2=nn.ReLU(inplace) self.conv5_3=nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)) self.relu5_3=nn.ReLU(inplace) self.pool5=nn.MaxPool2d(kernel_size=(2,2),stride=(2,2),dilation=(1,1),ceil_mode=False) self.fc6=nn.Linear(in_features=25088,out_features=4096,bias=True) self.relu6=nn.ReLU(inplace) self.drop6=nn.Dropout(p=0.5) self.fc7=nn.Linear(in_features=4096,out_features=4096,bias=True) self.relu7=nn.ReLU(inplace) self.drop7=nn.Dropout(p=0.5) self.fc8=nn.Linear(in_features=4096,out_features=num_classes,bias=True) self._initialize_weights() defforward(self,x): out=self.conv1_1(x) x_conv1=out out=self.relu1_1(out) out=self.conv1_2(out) out=self.relu1_2(out) out=self.pool1(out) x_pool1=out out=self.conv2_1(out) out=self.relu2_1(out) out=self.conv2_2(out) out=self.relu2_2(out) out=self.pool2(out) x_pool2=out out=self.conv3_1(out) out=self.relu3_1(out) out=self.conv3_2(out) out=self.relu3_2(out) out=self.conv3_3(out) out=self.relu3_3(out) out=self.pool3(out) x_pool3=out out=self.conv4_1(out) out=self.relu4_1(out) out=self.conv4_2(out) out=self.relu4_2(out) out=self.conv4_3(out) out=self.relu4_3(out) out=self.pool4(out) x_pool4=out out=self.conv5_1(out) out=self.relu5_1(out) out=self.conv5_2(out) out=self.relu5_2(out) out=self.conv5_3(out) out=self.relu5_3(out) out=self.pool5(out) x_pool5=out out=out.view(out.size(0),-1) out=self.fc6(out) out=self.relu6(out) out=self.fc7(out) out=self.relu7(out) out=self.fc8(out) returnout,x_pool1,x_pool2,x_pool3,x_pool4,x_pool5 def_initialize_weights(self): forminself.modules(): ifisinstance(m,nn.Conv2d): n=m.kernel_size[0]*m.kernel_size[1]*m.out_channels m.weight.data.normal_(0,math.sqrt(2./n)) ifm.biasisnotNone: m.bias.data.zero_() elifisinstance(m,nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elifisinstance(m,nn.Linear): m.weight.data.normal_(0,0.01) m.bias.data.zero_() defcopy(vgglayers,dstlayer,idx): layer=vgglayers[0][idx] kernel,bias=layer[0]['weights'][0][0] ifidxin[33,35]:#fc7,fc8 kernel=kernel.squeeze() dstlayer.weight.data.copy_(torch.from_numpy(kernel.transpose([1,0])))#matrixformat:axb->bxa elifidx==31:#fc6 kernel=kernel.reshape(-1,4096) dstlayer.weight.data.copy_(torch.from_numpy(kernel.transpose([1,0])))#matrixformat:axb->bxa else: dstlayer.weight.data.copy_(torch.from_numpy(kernel.transpose([3,2,1,0])))#matrixformat:axbxcxd->dxcxbxa dstlayer.bias.data.copy_(torch.from_numpy(bias.reshape(-1))) defget_vggface(vgg_path): """1.definepytorchmodel""" model=vgg16_face() """2.getpre-trainedweightsandotherparams""" #vgg_path="/home/hy/vgg-face.mat"#downloadfromhttp://www.vlfeat.org/matconvnet/pretrained/ vgg_weights=loadmat(vgg_path) data=vgg_weights meta=data['meta'] classes=meta['classes'] class_names=classes[0][0]['description'][0][0] normalization=meta['normalization'] average_image=np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]) image_size=np.squeeze(normalization[0][0]['imageSize'][0][0]) layers=data['layers'] #============================================================================= #foridx,layerinenumerate(layers[0]): #name=layer[0]['name'][0][0] #printidx,name #""" #0conv1_1 #1relu1_1 #2conv1_2 #3relu1_2 #4pool1 #5conv2_1 #6relu2_1 #7conv2_2 #8relu2_2 #9pool2 #10conv3_1 #11relu3_1 #12conv3_2 #13relu3_2 #14conv3_3 #15relu3_3 #16pool3 #17conv4_1 #18relu4_1 #19conv4_2 #20relu4_2 #21conv4_3 #22relu4_3 #23pool4 #24conv5_1 #25relu5_1 #26conv5_2 #27relu5_2 #28conv5_3 #29relu5_3 #30pool5 #31fc6 #32relu6 #33fc7 #34relu7 #35fc8 #36prob #""" #============================================================================= """3.loadweightstopytorchmodel""" copy(layers,model.conv1_1,0) copy(layers,model.conv1_2,2) copy(layers,model.conv2_1,5) copy(layers,model.conv2_2,7) copy(layers,model.conv3_1,10) copy(layers,model.conv3_2,12) copy(layers,model.conv3_3,14) copy(layers,model.conv4_1,17) copy(layers,model.conv4_2,19) copy(layers,model.conv4_3,21) copy(layers,model.conv5_1,24) copy(layers,model.conv5_2,26) copy(layers,model.conv5_3,28) copy(layers,model.fc6,31) copy(layers,model.fc7,33) copy(layers,model.fc8,35) returnmodel,class_names,average_image,image_size if__name__=='__main__': """test""" vgg_path="/home/hy/vgg-face.mat"#downloadfromhttp://www.vlfeat.org/matconvnet/pretrained/ model,class_names,average_image,image_size=get_vggface(vgg_path) imgpath="/home/hy/e/avg_face.jpg" img=sm.imread(imgpath) img=sm.imresize(img,[image_size[0],image_size[1]]) input_arr=np.float32(img)#-average_image#h,w,c x=torch.from_numpy(input_arr.transpose((2,0,1)))#c,h,w avg=torch.from_numpy(average_image)# avg=avg.view(3,1,1).expand(3,224,224) x=x-avg x=x.contiguous() x=x.view(1,x.size(0),x.size(1),x.size(2)) x=Variable(x) out,x_pool1,x_pool2,x_pool3,x_pool4,x_pool5=model(x) #plt.imshow(x_pool1.data.numpy()[0,45])#plot
以上这篇把vgg-face.mat权重迁移到pytorch模型示例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。
声明:本文内容来源于网络,版权归原作者所有,内容由互联网用户自发贡献自行上传,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任。如果您发现有涉嫌版权的内容,欢迎发送邮件至:czq8825#qq.com(发邮件时,请将#更换为@)进行举报,并提供相关证据,一经查实,本站将立刻删除涉嫌侵权内容。