使用Keras预训练模型ResNet50进行图像分类方式
Keras提供了一些用ImageNet训练过的模型:Xception,VGG16,VGG19,ResNet50,InceptionV3。在使用这些模型的时候,有一个参数include_top表示是否包含模型顶部的全连接层,如果包含,则可以将图像分为ImageNet中的1000类,如果不包含,则可以利用这些参数来做一些定制的事情。
在运行时自动下载有可能会失败,需要去网站中手动下载,放在“~/.keras/models/”中,使用WinPython则在“settings/.keras/models/”中。
修正:表示当前是训练模式还是测试模式的参数K.learning_phase()文中表述和使用有误,在该函数说明中可以看到:
Thelearningphaseflagisabooltensor(0=test,1=train),所以0是测试模式,1是训练模式,部分网络结构下两者有差别。
这里使用ResNet50预训练模型,对Caltech101数据集进行图像分类。只有CPU,运行较慢,但是在训练集固定的情况下,较慢的过程只需要运行一次。
该预训练模型的中文文档介绍在http://keras-cn.readthedocs.io/en/latest/other/application/#resnet50。
我使用的版本:
1.Ubuntu16.04.3
2.Python2.7
3.Keras2.0.8
4.Tensoflow1.3.0
5.Numpy1.13.1
6.python-opencv2.4.9.1+dfsg-1.5ubuntu1
7.h5py2.7.0
从文件夹中提取图像数据的方式:
函数:
defeachFile(filepath):#将目录内的文件名放入列表中 pathDir=os.listdir(filepath) out=[] forallDirinpathDir: child=allDir.decode('gbk')#.decode('gbk')是解决中文显示乱码问题 out.append(child) returnout defget_data(data_name,train_left=0.0,train_right=0.7,train_all=0.7,resize=True,data_format=None,t=''):#从文件夹中获取图像数据 file_name=os.path.join(pic_dir_out,data_name+t+'_'+str(train_left)+'_'+str(train_right)+'_'+str(Width)+"X"+str(Height)+".h5") printfile_name ifos.path.exists(file_name):#判断之前是否有存到文件中 f=h5py.File(file_name,'r') ift=='train': X_train=f['X_train'][:] y_train=f['y_train'][:] f.close() return(X_train,y_train) elift=='test': X_test=f['X_test'][:] y_test=f['y_test'][:] f.close() return(X_test,y_test) else: return data_format=conv_utils.normalize_data_format(data_format) pic_dir_set=eachFile(pic_dir_data) X_train=[] y_train=[] X_test=[] y_test=[] label=0 forpic_dirinpic_dir_set: printpic_dir_data+pic_dir ifnotos.path.isdir(os.path.join(pic_dir_data,pic_dir)): continue pic_set=eachFile(os.path.join(pic_dir_data,pic_dir)) pic_index=0 train_count=int(len(pic_set)*train_all) train_l=int(len(pic_set)*train_left) train_r=int(len(pic_set)*train_right) forpic_nameinpic_set: ifnotos.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)): continue img=cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name)) ifimgisNone: continue if(resize): img=cv2.resize(img,(Width,Height)) img=img.reshape(-1,Width,Height,3) if(pic_index=train_landpic_index 0: label+=1 f=h5py.File(file_name,'w') ift=='train': X_train=np.concatenate(X_train,axis=0) y_train=np.array(y_train) f.create_dataset('X_train',data=X_train) f.create_dataset('y_train',data=y_train) f.close() return(X_train,y_train) elift=='test': X_test=np.concatenate(X_test,axis=0) y_test=np.array(y_test) f.create_dataset('X_test',data=X_test) f.create_dataset('y_test',data=y_test) f.close() return(X_test,y_test) else: return
调用:
globalWidth,Height,pic_dir_out,pic_dir_data Width=224 Height=224 num_classes=102#Caltech101为102cifar10为10 pic_dir_out='/home/ccuux3/pic_cnn/pic_out/' pic_dir_data='/home/ccuux3/pic_cnn/pic_dataset/Caltech101/' sub_dir='224_resnet50/' ifnotos.path.isdir(os.path.join(pic_dir_out,sub_dir)): os.mkdir(os.path.join(pic_dir_out,sub_dir)) pic_dir_mine=os.path.join(pic_dir_out,sub_dir) (X_train,y_train)=get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='train') y_train=np_utils.to_categorical(y_train,num_classes)
载入预训练模型ResNet50,并将训练图像经过网络运算得到数据,不包含顶部的全连接层,得到的结果存成文件,以后可以直接调用(由于我内存不够,所以拆分了一下):
input_tensor=Input(shape=(224,224,3)) base_model=ResNet50(input_tensor=input_tensor,include_top=False,weights='imagenet') #base_model=ResNet50(input_tensor=input_tensor,include_top=False,weights=None) get_resnet50_output=K.function([base_model.layers[0].input,K.learning_phase()], [base_model.layers[-1].output]) file_name=os.path.join(pic_dir_mine,'resnet50_train_output'+'.h5') ifos.path.exists(file_name): f=h5py.File(file_name,'r') resnet50_train_output=f['resnet50_train_output'][:] f.close() else: resnet50_train_output=[] delta=10 foriinrange(0,len(X_train),delta): printi one_resnet50_train_output=get_resnet50_output([X_train[i:i+delta],0])[0] resnet50_train_output.append(one_resnet50_train_output) resnet50_train_output=np.concatenate(resnet50_train_output,axis=0) f=h5py.File(file_name,'w') f.create_dataset('resnet50_train_output',data=resnet50_train_output) f.close()
将ResNet50网络产生的结果用于图像分类:
input_tensor=Input(shape=(1,1,2048)) x=Flatten()(input_tensor) x=Dense(1024,activation='relu')(x) predictions=Dense(num_classes,activation='softmax')(x) model=Model(inputs=input_tensor,outputs=predictions) model.compile(optimizer=Adam(),loss='categorical_crossentropy',metrics=['accuracy'])
训练图像数据集:
print('\nTraining------------')#从文件中提取参数,训练后存在新的文件中 cm=0#修改这个参数可以多次训练 cm_str=''ifcm==0elsestr(cm) cm2_str=''if(cm+1)==0elsestr(cm+1) ifcm>=1: model.load_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm_str+'.h5')) model.fit(resnet50_train_output,y_train,epochs=10,batch_size=128,) model.save_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm2_str+'.h5'))
测试图像数据集:
(X_test,y_test)=get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='test') y_test=np_utils.to_categorical(y_test,num_classes) file_name=os.path.join(pic_dir_mine,'resnet50_test_output'+'.h5') ifos.path.exists(file_name): f=h5py.File(file_name,'r') resnet50_test_output=f['resnet50_test_output'][:] f.close() else: resnet50_test_output=[] delta=10 foriinrange(0,len(X_test),delta): printi one_resnet50_test_output=get_resnet50_output([X_test[i:i+delta],0])[0] resnet50_test_output.append(one_resnet50_test_output) resnet50_test_output=np.concatenate(resnet50_test_output,axis=0) f=h5py.File(file_name,'w') f.create_dataset('resnet50_test_output',data=resnet50_test_output) f.close() print('\nTesting------------')#对测试集进行评估 class_name_list=get_name_list(pic_dir_data)#获取top-N的每类的准确率 pred=model.predict(resnet50_test_output,batch_size=32)
输出测试集各类别top-5的准确率:
N=5 pred_list=[] forrowinpred: pred_list.append(row.argsort()[-N:][::-1])#获取最大的N个值的下标 pred_array=np.array(pred_list) test_arg=np.argmax(y_test,axis=1) class_count=[0for_inxrange(num_classes)] class_acc=[0for_inxrange(num_classes)] foriinxrange(len(test_arg)): class_count[test_arg[i]]+=1 iftest_arg[i]inpred_array[i]: class_acc[test_arg[i]]+=1 print('top-'+str(N)+'allacc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg))) foriinxrange(num_classes): print(i,class_name_list[i],'acc:'+str(class_acc[i])+'/'+str(class_count[i]))
完整代码:
#-*-coding:utf-8-*- importcv2 importnumpyasnp importh5py importos fromkeras.utilsimportnp_utils,conv_utils fromkeras.modelsimportModel fromkeras.layersimportFlatten,Dense,Input fromkeras.optimizersimportAdam fromkeras.applications.resnet50importResNet50 fromkerasimportbackendasK defget_name_list(filepath):#获取各个类别的名字 pathDir=os.listdir(filepath) out=[] forallDirinpathDir: ifos.path.isdir(os.path.join(filepath,allDir)): child=allDir.decode('gbk')#.decode('gbk')是解决中文显示乱码问题 out.append(child) returnout defeachFile(filepath):#将目录内的文件名放入列表中 pathDir=os.listdir(filepath) out=[] forallDirinpathDir: child=allDir.decode('gbk')#.decode('gbk')是解决中文显示乱码问题 out.append(child) returnout defget_data(data_name,train_left=0.0,train_right=0.7,train_all=0.7,resize=True,data_format=None,t=''):#从文件夹中获取图像数据 file_name=os.path.join(pic_dir_out,data_name+t+'_'+str(train_left)+'_'+str(train_right)+'_'+str(Width)+"X"+str(Height)+".h5") printfile_name ifos.path.exists(file_name):#判断之前是否有存到文件中 f=h5py.File(file_name,'r') ift=='train': X_train=f['X_train'][:] y_train=f['y_train'][:] f.close() return(X_train,y_train) elift=='test': X_test=f['X_test'][:] y_test=f['y_test'][:] f.close() return(X_test,y_test) else: return data_format=conv_utils.normalize_data_format(data_format) pic_dir_set=eachFile(pic_dir_data) X_train=[] y_train=[] X_test=[] y_test=[] label=0 forpic_dirinpic_dir_set: printpic_dir_data+pic_dir ifnotos.path.isdir(os.path.join(pic_dir_data,pic_dir)): continue pic_set=eachFile(os.path.join(pic_dir_data,pic_dir)) pic_index=0 train_count=int(len(pic_set)*train_all) train_l=int(len(pic_set)*train_left) train_r=int(len(pic_set)*train_right) forpic_nameinpic_set: ifnotos.path.isfile(os.path.join(pic_dir_data,pic_dir,pic_name)): continue img=cv2.imread(os.path.join(pic_dir_data,pic_dir,pic_name)) ifimgisNone: continue if(resize): img=cv2.resize(img,(Width,Height)) img=img.reshape(-1,Width,Height,3) if(pic_index=train_landpic_index 0: label+=1 f=h5py.File(file_name,'w') ift=='train': X_train=np.concatenate(X_train,axis=0) y_train=np.array(y_train) f.create_dataset('X_train',data=X_train) f.create_dataset('y_train',data=y_train) f.close() return(X_train,y_train) elift=='test': X_test=np.concatenate(X_test,axis=0) y_test=np.array(y_test) f.create_dataset('X_test',data=X_test) f.create_dataset('y_test',data=y_test) f.close() return(X_test,y_test) else: return defmain(): globalWidth,Height,pic_dir_out,pic_dir_data Width=224 Height=224 num_classes=102#Caltech101为102cifar10为10 pic_dir_out='/home/ccuux3/pic_cnn/pic_out/' pic_dir_data='/home/ccuux3/pic_cnn/pic_dataset/Caltech101/' sub_dir='224_resnet50/' ifnotos.path.isdir(os.path.join(pic_dir_out,sub_dir)): os.mkdir(os.path.join(pic_dir_out,sub_dir)) pic_dir_mine=os.path.join(pic_dir_out,sub_dir) (X_train,y_train)=get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='train') y_train=np_utils.to_categorical(y_train,num_classes) input_tensor=Input(shape=(224,224,3)) base_model=ResNet50(input_tensor=input_tensor,include_top=False,weights='imagenet') #base_model=ResNet50(input_tensor=input_tensor,include_top=False,weights=None) get_resnet50_output=K.function([base_model.layers[0].input,K.learning_phase()], [base_model.layers[-1].output]) file_name=os.path.join(pic_dir_mine,'resnet50_train_output'+'.h5') ifos.path.exists(file_name): f=h5py.File(file_name,'r') resnet50_train_output=f['resnet50_train_output'][:] f.close() else: resnet50_train_output=[] delta=10 foriinrange(0,len(X_train),delta): printi one_resnet50_train_output=get_resnet50_output([X_train[i:i+delta],0])[0] resnet50_train_output.append(one_resnet50_train_output) resnet50_train_output=np.concatenate(resnet50_train_output,axis=0) f=h5py.File(file_name,'w') f.create_dataset('resnet50_train_output',data=resnet50_train_output) f.close() input_tensor=Input(shape=(1,1,2048)) x=Flatten()(input_tensor) x=Dense(1024,activation='relu')(x) predictions=Dense(num_classes,activation='softmax')(x) model=Model(inputs=input_tensor,outputs=predictions) model.compile(optimizer=Adam(),loss='categorical_crossentropy',metrics=['accuracy']) print('\nTraining------------')#从文件中提取参数,训练后存在新的文件中 cm=0#修改这个参数可以多次训练 cm_str=''ifcm==0elsestr(cm) cm2_str=''if(cm+1)==0elsestr(cm+1) ifcm>=1: model.load_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm_str+'.h5')) model.fit(resnet50_train_output,y_train,epochs=10,batch_size=128,) model.save_weights(os.path.join(pic_dir_mine,'cnn_model_Caltech101_resnet50_'+cm2_str+'.h5')) (X_test,y_test)=get_data("Caltech101_color_data_",0.0,0.7,data_format='channels_last',t='test') y_test=np_utils.to_categorical(y_test,num_classes) file_name=os.path.join(pic_dir_mine,'resnet50_test_output'+'.h5') ifos.path.exists(file_name): f=h5py.File(file_name,'r') resnet50_test_output=f['resnet50_test_output'][:] f.close() else: resnet50_test_output=[] delta=10 foriinrange(0,len(X_test),delta): printi one_resnet50_test_output=get_resnet50_output([X_test[i:i+delta],0])[0] resnet50_test_output.append(one_resnet50_test_output) resnet50_test_output=np.concatenate(resnet50_test_output,axis=0) f=h5py.File(file_name,'w') f.create_dataset('resnet50_test_output',data=resnet50_test_output) f.close() print('\nTesting------------')#对测试集进行评估 class_name_list=get_name_list(pic_dir_data)#获取top-N的每类的准确率 pred=model.predict(resnet50_test_output,batch_size=32) f=h5py.File(os.path.join(pic_dir_mine,'pred_'+cm2_str+'.h5'),'w') f.create_dataset('pred',data=pred) f.close() N=1 pred_list=[] forrowinpred: pred_list.append(row.argsort()[-N:][::-1])#获取最大的N个值的下标 pred_array=np.array(pred_list) test_arg=np.argmax(y_test,axis=1) class_count=[0for_inxrange(num_classes)] class_acc=[0for_inxrange(num_classes)] foriinxrange(len(test_arg)): class_count[test_arg[i]]+=1 iftest_arg[i]inpred_array[i]: class_acc[test_arg[i]]+=1 print('top-'+str(N)+'allacc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg))) foriinxrange(num_classes): print(i,class_name_list[i],'acc:'+str(class_acc[i])+'/'+str(class_count[i])) print('----------------------------------------------------') N=5 pred_list=[] forrowinpred: pred_list.append(row.argsort()[-N:][::-1])#获取最大的N个值的下标 pred_array=np.array(pred_list) test_arg=np.argmax(y_test,axis=1) class_count=[0for_inxrange(num_classes)] class_acc=[0for_inxrange(num_classes)] foriinxrange(len(test_arg)): class_count[test_arg[i]]+=1 iftest_arg[i]inpred_array[i]: class_acc[test_arg[i]]+=1 print('top-'+str(N)+'allacc:',str(sum(class_acc))+'/'+str(len(test_arg)),sum(class_acc)/float(len(test_arg))) foriinxrange(num_classes): print(i,class_name_list[i],'acc:'+str(class_acc[i])+'/'+str(class_count[i])) if__name__=='__main__': main()
运行结果:
UsingTensorFlowbackend. /home/ccuux3/pic_cnn/pic_out/Caltech101_color_data_train_0.0_0.7_224X224.h5 Training------------ Epoch1/10 6353/6353[==============================]-5s-loss:1.1269-acc:0.7494 Epoch2/10 6353/6353[==============================]-4s-loss:0.1603-acc:0.9536 Epoch3/10 6353/6353[==============================]-4s-loss:0.0580-acc:0.9855 Epoch4/10 6353/6353[==============================]-4s-loss:0.0312-acc:0.9931 Epoch5/10 6353/6353[==============================]-4s-loss:0.0182-acc:0.9956 Epoch6/10 6353/6353[==============================]-4s-loss:0.0111-acc:0.9976 Epoch7/10 6353/6353[==============================]-4s-loss:0.0090-acc:0.9981 Epoch8/10 6353/6353[==============================]-4s-loss:0.0082-acc:0.9987 Epoch9/10 6353/6353[==============================]-4s-loss:0.0069-acc:0.9994 Epoch10/10 6353/6353[==============================]-4s-loss:0.0087-acc:0.9987 /home/ccuux3/pic_cnn/pic_out/Caltech101_color_data_test_0.0_0.7_224X224.h5 Testing------------ ('top-1allacc:','2597/2792',0.9301575931232091) (0,u'62.mayfly','acc:10/12') (1,u'66.Motorbikes','acc:240/240') (2,u'68.octopus','acc:7/11') (3,u'94.umbrella','acc:21/23') (4,u'90.strawberry','acc:10/11') (5,u'86.stapler','acc:13/14') (6,u'83.sea_horse','acc:15/18') (7,u'72.pigeon','acc:13/14') (8,u'89.stop_sign','acc:19/20') (9,u'4.BACKGROUND_Google','acc:125/141') (10,u'22.cougar_face','acc:18/21') (11,u'81.scissors','acc:9/12') (12,u'100.wrench','acc:8/12') (13,u'57.Leopards','acc:60/60') (14,u'46.hawksbill','acc:29/30') (15,u'30.dolphin','acc:19/20') (16,u'9.bonsai','acc:39/39') (17,u'35.euphonium','acc:18/20') (18,u'44.gramophone','acc:16/16') (19,u'74.platypus','acc:7/11') (20,u'14.camera','acc:15/15') (21,u'55.lamp','acc:15/19') (22,u'38.Faces_easy','acc:129/131') (23,u'54.ketch','acc:28/35') (24,u'33.elephant','acc:18/20') (25,u'3.ant','acc:8/13') (26,u'49.helicopter','acc:26/27') (27,u'36.ewer','acc:26/26') (28,u'78.rooster','acc:14/15') (29,u'70.pagoda','acc:15/15') (30,u'58.llama','acc:20/24') (31,u'5.barrel','acc:15/15') (32,u'101.yin_yang','acc:18/18') (33,u'18.cellphone','acc:18/18') (34,u'59.lobster','acc:7/13') (35,u'17.ceiling_fan','acc:14/15') (36,u'16.car_side','acc:37/37') (37,u'50.ibis','acc:24/24') (38,u'76.revolver','acc:23/25') (39,u'84.snoopy','acc:7/11') (40,u'87.starfish','acc:26/26') (41,u'12.buddha','acc:24/26') (42,u'52.joshua_tree','acc:20/20') (43,u'43.gerenuk','acc:10/11') (44,u'65.minaret','acc:23/23') (45,u'91.sunflower','acc:26/26') (46,u'56.laptop','acc:24/25') (47,u'77.rhino','acc:17/18') (48,u'1.airplanes','acc:239/240') (49,u'88.stegosaurus','acc:16/18') (50,u'23.crab','acc:17/22') (51,u'8.binocular','acc:8/10') (52,u'31.dragonfly','acc:18/21') (53,u'6.bass','acc:15/17') (54,u'95.watch','acc:72/72') (55,u'0.accordion','acc:17/17') (56,u'98.wild_cat','acc:9/11') (57,u'67.nautilus','acc:16/17') (58,u'40.flamingo','acc:20/21') (59,u'92.tick','acc:12/15') (60,u'47.headphone','acc:12/13') (61,u'24.crayfish','acc:15/21') (62,u'97.wheelchair','acc:17/18') (63,u'27.cup','acc:15/18') (64,u'25.crocodile','acc:14/15') (65,u'2.anchor','acc:7/13') (66,u'19.chair','acc:17/19') (67,u'39.ferry','acc:21/21') (68,u'60.lotus','acc:16/20') (69,u'13.butterfly','acc:26/28') (70,u'34.emu','acc:14/16') (71,u'64.metronome','acc:10/10') (72,u'82.scorpion','acc:24/26') (73,u'7.beaver','acc:12/14') (74,u'48.hedgehog','acc:16/17') (75,u'37.Faces','acc:131/131') (76,u'45.grand_piano','acc:30/30') (77,u'79.saxophone','acc:11/12') (78,u'26.crocodile_head','acc:9/16') (79,u'80.schooner','acc:15/19') (80,u'93.trilobite','acc:26/26') (81,u'28.dalmatian','acc:21/21') (82,u'10.brain','acc:28/30') (83,u'61.mandolin','acc:10/13') (84,u'11.brontosaurus','acc:11/13') (85,u'63.menorah','acc:25/27') (86,u'85.soccer_ball','acc:20/20') (87,u'51.inline_skate','acc:9/10') (88,u'71.panda','acc:11/12') (89,u'53.kangaroo','acc:24/26') (90,u'99.windsor_chair','acc:16/17') (91,u'42.garfield','acc:11/11') (92,u'29.dollar_bill','acc:16/16') (93,u'20.chandelier','acc:30/33') (94,u'96.water_lilly','acc:6/12') (95,u'41.flamingo_head','acc:13/14') (96,u'73.pizza','acc:13/16') (97,u'21.cougar_body','acc:15/15') (98,u'75.pyramid','acc:16/18') (99,u'69.okapi','acc:12/12') (100,u'15.cannon','acc:11/13') (101,u'32.electric_guitar','acc:19/23') ---------------------------------------------------- ('top-5allacc:','2759/2792',0.9881805157593123) (0,u'62.mayfly','acc:12/12') (1,u'66.Motorbikes','acc:240/240') (2,u'68.octopus','acc:11/11') (3,u'94.umbrella','acc:23/23') (4,u'90.strawberry','acc:11/11') (5,u'86.stapler','acc:14/14') (6,u'83.sea_horse','acc:16/18') (7,u'72.pigeon','acc:14/14') (8,u'89.stop_sign','acc:20/20') (9,u'4.BACKGROUND_Google','acc:141/141') (10,u'22.cougar_face','acc:19/21') (11,u'81.scissors','acc:11/12') (12,u'100.wrench','acc:10/12') (13,u'57.Leopards','acc:60/60') (14,u'46.hawksbill','acc:30/30') (15,u'30.dolphin','acc:20/20') (16,u'9.bonsai','acc:39/39') (17,u'35.euphonium','acc:20/20') (18,u'44.gramophone','acc:16/16') (19,u'74.platypus','acc:9/11') (20,u'14.camera','acc:15/15') (21,u'55.lamp','acc:18/19') (22,u'38.Faces_easy','acc:131/131') (23,u'54.ketch','acc:34/35') (24,u'33.elephant','acc:20/20') (25,u'3.ant','acc:10/13') (26,u'49.helicopter','acc:27/27') (27,u'36.ewer','acc:26/26') (28,u'78.rooster','acc:15/15') (29,u'70.pagoda','acc:15/15') (30,u'58.llama','acc:24/24') (31,u'5.barrel','acc:15/15') (32,u'101.yin_yang','acc:18/18') (33,u'18.cellphone','acc:18/18') (34,u'59.lobster','acc:13/13') (35,u'17.ceiling_fan','acc:14/15') (36,u'16.car_side','acc:37/37') (37,u'50.ibis','acc:24/24') (38,u'76.revolver','acc:25/25') (39,u'84.snoopy','acc:10/11') (40,u'87.starfish','acc:26/26') (41,u'12.buddha','acc:25/26') (42,u'52.joshua_tree','acc:20/20') (43,u'43.gerenuk','acc:11/11') (44,u'65.minaret','acc:23/23') (45,u'91.sunflower','acc:26/26') (46,u'56.laptop','acc:25/25') (47,u'77.rhino','acc:18/18') (48,u'1.airplanes','acc:240/240') (49,u'88.stegosaurus','acc:18/18') (50,u'23.crab','acc:22/22') (51,u'8.binocular','acc:10/10') (52,u'31.dragonfly','acc:20/21') (53,u'6.bass','acc:16/17') (54,u'95.watch','acc:72/72') (55,u'0.accordion','acc:17/17') (56,u'98.wild_cat','acc:11/11') (57,u'67.nautilus','acc:17/17') (58,u'40.flamingo','acc:21/21') (59,u'92.tick','acc:13/15') (60,u'47.headphone','acc:12/13') (61,u'24.crayfish','acc:21/21') (62,u'97.wheelchair','acc:18/18') (63,u'27.cup','acc:16/18') (64,u'25.crocodile','acc:15/15') (65,u'2.anchor','acc:12/13') (66,u'19.chair','acc:19/19') (67,u'39.ferry','acc:21/21') (68,u'60.lotus','acc:19/20') (69,u'13.butterfly','acc:27/28') (70,u'34.emu','acc:16/16') (71,u'64.metronome','acc:10/10') (72,u'82.scorpion','acc:26/26') (73,u'7.beaver','acc:14/14') (74,u'48.hedgehog','acc:17/17') (75,u'37.Faces','acc:131/131') (76,u'45.grand_piano','acc:30/30') (77,u'79.saxophone','acc:12/12') (78,u'26.crocodile_head','acc:14/16') (79,u'80.schooner','acc:19/19') (80,u'93.trilobite','acc:26/26') (81,u'28.dalmatian','acc:21/21') (82,u'10.brain','acc:30/30') (83,u'61.mandolin','acc:13/13') (84,u'11.brontosaurus','acc:13/13') (85,u'63.menorah','acc:25/27') (86,u'85.soccer_ball','acc:20/20') (87,u'51.inline_skate','acc:10/10') (88,u'71.panda','acc:12/12') (89,u'53.kangaroo','acc:26/26') (90,u'99.windsor_chair','acc:17/17') (91,u'42.garfield','acc:11/11') (92,u'29.dollar_bill','acc:16/16') (93,u'20.chandelier','acc:32/33') (94,u'96.water_lilly','acc:12/12') (95,u'41.flamingo_head','acc:14/14') (96,u'73.pizza','acc:16/16') (97,u'21.cougar_body','acc:15/15') (98,u'75.pyramid','acc:18/18') (99,u'69.okapi','acc:12/12') (100,u'15.cannon','acc:12/13') (101,u'32.electric_guitar','acc:23/23')
以上这篇使用Keras预训练模型ResNet50进行图像分类方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。