在Keras中CNN联合LSTM进行分类实例
我就废话不多说,大家还是直接看代码吧~
defget_model():
n_classes=6
inp=Input(shape=(40,80))
reshape=Reshape((1,40,80))(inp)
#pre=ZeroPadding2D(padding=(1,1))(reshape)
#1
conv1=Convolution2D(32,3,3,border_mode='same',init='glorot_uniform')(reshape)
#model.add(Activation('relu'))
l1=LeakyReLU(alpha=0.33)(conv1)
conv2=ZeroPadding2D(padding=(1,1))(l1)
conv2=Convolution2D(32,3,3,border_mode='same',init='glorot_uniform')(conv2)
#model.add(Activation('relu'))
l2=LeakyReLU(alpha=0.33)(conv2)
m2=MaxPooling2D((3,3),strides=(3,3))(l2)
d2=Dropout(0.25)(m2)
#2
conv3=ZeroPadding2D(padding=(1,1))(d2)
conv3=Convolution2D(64,3,3,border_mode='same',init='glorot_uniform')(conv3)
#model.add(Activation('relu'))
l3=LeakyReLU(alpha=0.33)(conv3)
conv4=ZeroPadding2D(padding=(1,1))(l3)
conv4=Convolution2D(64,3,3,border_mode='same',init='glorot_uniform')(conv4)
#model.add(Activation('relu'))
l4=LeakyReLU(alpha=0.33)(conv4)
m4=MaxPooling2D((3,3),strides=(3,3))(l4)
d4=Dropout(0.25)(m4)
#3
conv5=ZeroPadding2D(padding=(1,1))(d4)
conv5=Convolution2D(128,3,3,border_mode='same',init='glorot_uniform')(conv5)
#model.add(Activation('relu'))
l5=LeakyReLU(alpha=0.33)(conv5)
conv6=ZeroPadding2D(padding=(1,1))(l5)
conv6=Convolution2D(128,3,3,border_mode='same',init='glorot_uniform')(conv6)
#model.add(Activation('relu'))
l6=LeakyReLU(alpha=0.33)(conv6)
m6=MaxPooling2D((3,3),strides=(3,3))(l6)
d6=Dropout(0.25)(m6)
#4
conv7=ZeroPadding2D(padding=(1,1))(d6)
conv7=Convolution2D(256,3,3,border_mode='same',init='glorot_uniform')(conv7)
#model.add(Activation('relu'))
l7=LeakyReLU(alpha=0.33)(conv7)
conv8=ZeroPadding2D(padding=(1,1))(l7)
conv8=Convolution2D(256,3,3,border_mode='same',init='glorot_uniform')(conv8)
#model.add(Activation('relu'))
l8=LeakyReLU(alpha=0.33)(conv8)
g=GlobalMaxPooling2D()(l8)
print("g=",g)
#g1=Flatten()(g)
lstm1=LSTM(
input_shape=(40,80),
output_dim=256,
activation='tanh',
return_sequences=False)(inp)
dl1=Dropout(0.3)(lstm1)
den1=Dense(200,activation="relu")(dl1)
#model.add(Activation('relu'))
#l11=LeakyReLU(alpha=0.33)(d11)
dl2=Dropout(0.3)(den1)
#lstm2=LSTM(
#256,activation='tanh',
#return_sequences=False)(lstm1)
#dl2=Dropout(0.5)(lstm2)
print("dl2=",dl1)
g2=concatenate([g,dl2],axis=1)
d10=Dense(1024)(g2)
#model.add(Activation('relu'))
l10=LeakyReLU(alpha=0.33)(d10)
l10=Dropout(0.5)(l10)
l11=Dense(n_classes,activation='softmax')(l10)
model=Model(input=inp,outputs=l11)
model.summary()
#编译model
adam=keras.optimizers.Adam(lr=0.0005,beta_1=0.95,beta_2=0.999,epsilon=1e-08)
#adam=keras.optimizers.Adam(lr=0.001,beta_1=0.95,beta_2=0.999,epsilon=1e-08)
#sgd=keras.optimizers.SGD(lr=0.001,decay=1e-06,momentum=0.9,nesterov=False)
#reduce_lr=ReduceLROnPlateau(monitor='loss',factor=0.1,patience=2,verbose=1,min_lr=0.00000001,mode='min')
model.compile(loss='categorical_crossentropy',optimizer=adam,metrics=['accuracy'])
returnmodel
补充知识:keras中如何将不同的模型联合起来(以cnn/lstm为例)
可能会遇到多种模型需要揉在一起,如cnn和lstm,而我一般在keras框架下开局就是一句
model=Sequential()
然后model.add,model.add,......到最后
model.compile(loss=["mae"],optimizer='adam',metrics=[mape])
这突然要把模型加起来,这可怎么办?
以下示例代码是将cnn和lstm联合起来,先是由cnn模型卷积池化得到特征,再输入到lstm模型中得到最终输出
importos
importkeras
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
fromkeras.modelsimportModel
fromkeras.layersimport*
frommatplotlibimportpyplot
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
fromkeras.layersimportDense,Dropout,Activation,Convolution2D,MaxPooling2D,Flatten
fromkeras.layersimportLSTM
defdesign_model():
#designnetwork
inp=Input(shape=(11,5))
reshape=Reshape((11,5,1))(inp)
conv1=Convolution2D(32,3,3,border_mode='same',init='glorot_uniform')(reshape)
print(conv1)
l1=Activation('relu')(conv1)
conv2=Convolution2D(64,3,3,border_mode='same',)(l1)
l2=Activation('relu')(conv2)
print(l2)
m2=MaxPooling2D(pool_size=(2,2),border_mode='valid')(l2)
print(m2)
reshape1=Reshape((10,64))(m2)
lstm1=LSTM(input_shape=(10,64),output_dim=30,activation='tanh',return_sequences=False)(reshape1)
dl1=Dropout(0.3)(lstm1)
#den1=Dense(100,activation="relu")(dl1)
den2=Dense(1,activation="relu")(dl1)
model=Model(input=inp,outputs=den2)
model.summary()#打印出模型概况
adam=keras.optimizers.Adam(lr=0.001,beta_1=0.95,beta_2=0.999,epsilon=1e-08)
model.compile(loss=["mae"],optimizer=adam,metrics=['mape'])
returnmodel
model=design_model()
history=model.fit(train_x,train_y,epochs=epochs,batch_size=batch_size,validation_data=[test_x,test_y],verbose=2,shuffle=True)
##saveLeNet_model_filesaftertrain
model.save('model_trained.h5')
以上示例代码中cnn和lstm是串联即cnn输出作为lstm的输入,一条路线到底
如果想实现并联,即分开再汇总到一起
可用concatenate函数把cnn的输出端和lstm的输出端合并起来,后面再接上其他层,完成整个模型图的构建。
g2=concatenate([g,dl2],axis=1)
总结一下:
这是keras框架下除了Sequential另一种函数式构建模型的方式,更有灵活性,主要是在模型最后通过model=Model(input=inp,outputs=den2)来确定整个模型的输入和输出
以上这篇在Keras中CNN联合LSTM进行分类实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。