kaggle+mnist实现手写字体识别
现在的许多手写字体识别代码都是基于已有的mnist手写字体数据集进行的,而kaggle需要用到网站上给出的数据集并生成测试集的输出用于提交。这里选择keras搭建卷积网络进行识别,可以直接生成测试集的结果,最终结果识别率大概97%左右的样子。
#-*-coding:utf-8-*-
"""
CreatedonTueJun619:07:102017
@author:Administrator
"""
fromkeras.modelsimportSequential
fromkeras.layersimportDense,Dropout,Activation,Flatten
fromkeras.layersimportConvolution2D,MaxPooling2D
fromkeras.utilsimportnp_utils
importos
importpandasaspd
importnumpyasnp
fromtensorflow.examples.tutorials.mnistimportinput_data
fromkerasimportbackendasK
importtensorflowastf
#全局变量
batch_size=100
nb_classes=10
epochs=20
#inputimagedimensions
img_rows,img_cols=28,28
#numberofconvolutionalfilterstouse
nb_filters=32
#sizeofpoolingareaformaxpooling
pool_size=(2,2)
#convolutionkernelsize
kernel_size=(3,3)
inputfile='F:/data/kaggle/mnist/train.csv'
inputfile2='F:/data/kaggle/mnist/test.csv'
outputfile='F:/data/kaggle/mnist/test_label.csv'
pwd=os.getcwd()
os.chdir(os.path.dirname(inputfile))
train=pd.read_csv(os.path.basename(inputfile))#从训练数据文件读取数据
os.chdir(pwd)
pwd=os.getcwd()
os.chdir(os.path.dirname(inputfile))
test=pd.read_csv(os.path.basename(inputfile2))#从测试数据文件读取数据
os.chdir(pwd)
x_train=train.iloc[:,1:785]#得到特征数据
y_train=train['label']
y_train=np_utils.to_categorical(y_train,10)
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)#导入数据
x_test=mnist.test.images
y_test=mnist.test.labels
#根据不同的backend定下不同的格式
ifK.image_dim_ordering()=='th':
x_train=np.array(x_train)
test=np.array(test)
x_train=x_train.reshape(x_train.shape[0],1,img_rows,img_cols)
x_test=x_test.reshape(x_test.shape[0],1,img_rows,img_cols)
input_shape=(1,img_rows,img_cols)
test=test.reshape(test.shape[0],1,img_rows,img_cols)
else:
x_train=np.array(x_train)
test=np.array(test)
x_train=x_train.reshape(x_train.shape[0],img_rows,img_cols,1)
X_test=x_test.reshape(x_test.shape[0],img_rows,img_cols,1)
test=test.reshape(test.shape[0],img_rows,img_cols,1)
input_shape=(img_rows,img_cols,1)
x_train=x_train.astype('float32')
x_test=X_test.astype('float32')
test=test.astype('float32')
x_train/=255
X_test/=255
test/=255
print('X_trainshape:',x_train.shape)
print(x_train.shape[0],'trainsamples')
print(x_test.shape[0],'testsamples')
print(test.shape[0],'testOuputsamples')
model=Sequential()#modelinitial
model.add(Convolution2D(nb_filters,(kernel_size[0],kernel_size[1]),
padding='same',
input_shape=input_shape))#卷积层1
model.add(Activation('relu'))#激活层
model.add(Convolution2D(nb_filters,(kernel_size[0],kernel_size[1])))#卷积层2
model.add(Activation('relu'))#激活层
model.add(MaxPooling2D(pool_size=pool_size))#池化层
model.add(Dropout(0.25))#神经元随机失活
model.add(Flatten())#拉成一维数据
model.add(Dense(128))#全连接层1
model.add(Activation('relu'))#激活层
model.add(Dropout(0.5))#随机失活
model.add(Dense(nb_classes))#全连接层2
model.add(Activation('softmax'))#Softmax评分
#编译模型
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
#训练模型
model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=1)
model.predict(x_test)
#评估模型
score=model.evaluate(x_test,y_test,verbose=0)
print('Testscore:',score[0])
print('Testaccuracy:',score[1])
y_test=model.predict(test)
sess=tf.InteractiveSession()
y_test=sess.run(tf.arg_max(y_test,1))
y_test=pd.DataFrame(y_test)
y_test.to_csv(outputfile)
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持毛票票。