python构建深度神经网络(DNN)
本文学习NeuralNetworksandDeepLearning在线免费书籍,用python构建神经网络识别手写体的一个总结。
代码主要包括两三部分:
1)、数据调用和预处理
2)、神经网络类构建和方法建立
3)、代码测试文件
1)数据调用:
#!/usr/bin/envpython #-*-coding:utf-8-*- #@Time:2017-03-1215:11 #@Author:CC #@File:net_load_data.py #@Software:PyCharmCommunityEdition fromnumpyimport* importnumpyasnp importcPickle defload_data(): """载入解压后的数据,并读取""" withopen('data/mnist_pkl/mnist.pkl','rb')asf: try: train_data,validation_data,test_data=cPickle.load(f) print"thefileopensucessfully" #printtrain_data[0].shape#(50000,784) #printtrain_data[1].shape#(50000,) return(train_data,validation_data,test_data) exceptEOFError: print'thefileopenerror' returnNone defdata_transform(): """将数据转化为计算格式""" t_d,va_d,te_d=load_data() #printt_d[0].shape#(50000,784) #printte_d[0].shape#(10000,784) #printva_d[0].shape#(10000,784) #n1=[np.reshape(x,784,1)forxint_d[0]]#将5万个数据分别逐个取出化成(784,1),逐个排列 n=[np.reshape(x,(784,1))forxint_d[0]]#将5万个数据分别逐个取出化成(784,1),逐个排列 #print'n1',n1[0].shape #print'n',n[0].shape m=[vectors(y)foryint_d[1]]#将5万标签(50000,1)化为(10,50000) train_data=zip(n,m)#将数据与标签打包成元组形式 n=[np.reshape(x,(784,1))forxinva_d[0]]#将5万个数据分别逐个取出化成(784,1),排列 validation_data=zip(n,va_d[1])#没有将标签数据矢量化 n=[np.reshape(x,(784,1))forxinte_d[0]]#将5万个数据分别逐个取出化成(784,1),排列 test_data=zip(n,te_d[1])#没有将标签数据矢量化 #printtrain_data[0][0].shape#(784,) #print"len(train_data[0])",len(train_data[0])#2 #print"len(train_data[100])",len(train_data[100])#2 #print"len(train_data[0][0])",len(train_data[0][0])#784 #print"train_data[0][0].shape",train_data[0][0].shape#(784,1) #print"len(train_data)",len(train_data)#50000 #printtrain_data[0][1].shape#(10,1) #printtest_data[0][1]#7 return(train_data,validation_data,test_data) defvectors(y): """赋予标签""" label=np.zeros((10,1)) label[y]=1.0#浮点计算 returnlabel
2)网络构建
#!/usr/bin/envpython #-*-coding:utf-8-*- #@Time:2017-03-1216:07 #@Author:CC #@File:net_network.py importnumpyasnp importrandom classNetwork(object):#默认为基类?用于继承:printisinstance(network,object) def__init__(self,sizes): self.num_layers=len(sizes) self.sizes=sizes #print'num_layers',self.num_layers self.weight=[np.random.randn(a1,a2)for(a1,a2)inzip(sizes[1:],sizes[:-1])]#产生一个个数组 self.bias=[np.random.randn(a3,1)fora3insizes[1:]] #printself.weight[0].shape#(20,10) defSGD(self,train_data,min_batch_size,epoches,eta,test_data=False): """1)打乱样本,将训练数据划分成小批次 2)计算出反向传播梯度 3)获得权重更新""" iftest_data:n_test=len(test_data) n=len(train_data)#50000 random.shuffle(train_data)#打乱 min_batches=[train_data[k:k+min_batch_size]forkinxrange(0,n,min_batch_size)]#提取批次数据 forkinxrange(0,epoches):#利用更新后的权值继续更新 random.shuffle(train_data)#打乱 formin_batchinmin_batches:#逐个传入,效率很低 self.updata_parameter(min_batch,eta) iftest_data: num=self.evaluate(test_data) print"the{0}thepoches:{1}/{2}".format(k,num,len(test_data)) else: print'epoches{0}completed'.format(k) defforward(self,x): """获得各层激活值""" forw,binzip(self.weight,self.bias): x=sigmoid(np.dot(w,x)+b) returnx defupdata_parameter(self,min_batch,eta): """1)反向传播计算每个样本梯度值 2)累加每个批次样本的梯度值 3)权值更新""" ndeltab=[np.zeros(b.shape)forbinself.bias] ndeltaw=[np.zeros(w.shape)forwinself.weight] forx,yinmin_batch: deltab,deltaw=self.backprop(x,y) ndeltab=[nb+dbfornb,dbinzip(ndeltab,deltab)] ndeltaw=[nw+dwfornw,dwinzip(ndeltaw,deltaw)] self.bias=[b-eta*ndb/len(min_batch)forndb,binzip(ndeltab,self.bias)] self.weight=[w-eta*ndw/len(min_batch)forndw,winzip(ndeltaw,self.weight)] defbackprop(self,x,y): """执行前向计算,再进行反向传播,返回deltaw,deltab""" #[wforwinself.weight] #print'len',len(w) #print"self.weight",self.weight[0].shape #printw[0].shape #printw[1].shape #printw.shape activation=x activations=[x] zs=[] #feedforward forw,binzip(self.weight,self.bias): #printw.shape,activation.shape,b.shape z=np.dot(w,activation)+b zs.append(z)#用于计算f(z)导数 activation=sigmoid(z) #print'activation',activation.shape activations.append(activation)#每层的输出结果 delta=self.top_subtract(activations[-1],y)*dsigmoid(zs[-1])#最后一层的delta,np.array乘,相同维度乘 deltaw=[np.zeros(w1.shape)forw1inself.weight]#每一次将获得的值作为列表形式赋给deltaw deltab=[np.zeros(b1.shape)forb1inself.bias] #print'deltab[0]',deltab[-1].shape deltab[-1]=delta deltaw[-1]=np.dot(delta,activations[-2].transpose()) forkinxrange(2,self.num_layers): delta=np.dot(self.weight[-k+1].transpose(),delta)*dsigmoid(zs[-k]) deltab[-k]=delta deltaw[-k]=np.dot(delta,activations[-k-1].transpose()) return(deltab,deltaw) defevaluate(self,test_data): """评估验证集和测试集的精度,标签直接一个数作为比较""" z=[(np.argmax(self.forward(x)),y)forx,yintest_data] zs=np.sum(int(a==b)fora,binz) #zk=sum(int(a==b)fora,binz) #print"zs/zk:",zs,zk returnzs deftop_subtract(self,x,y): return(x-y) defsigmoid(x): return1.0/(1.0+np.exp(-x)) defdsigmoid(x): z=sigmoid(x) returnz*(1-z)
3)网络测试
#!/usr/bin/envpython #-*-coding:utf-8-*- #@Time:2017-03-1215:24 #@Author:CC #@File:net_test.py importnet_load_data #net_load_data.load_data() train_data,validation_data,test_data=net_load_data.data_transform() importnet_networkasnet net1=net.Network([784,30,10]) min_batch_size=10 eta=3.0 epoches=30 net1.SGD(train_data,min_batch_size,epoches,eta,test_data) print"complete"
4)结果
the9thepoches:9405/10000 the10thepoches:9420/10000 the11thepoches:9385/10000 the12thepoches:9404/10000 the13thepoches:9398/10000 the14thepoches:9406/10000 the15thepoches:9396/10000 the16thepoches:9413/10000 the17thepoches:9405/10000 the18thepoches:9425/10000 the19thepoches:9420/10000
总体来说这本书的实例,用来熟悉python和神经网络非常好。
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持毛票票。