python实现登陆知乎获得个人收藏并保存为word文件
这个程序其实很早之前就完成了,一直没有发出了,趁着最近不是很忙就分享给大家.
使用BeautifulSoup模块和urllib2模块实现,然后保存成word是使用pythondocx模块的,安装方式网上一搜一大堆,我就不再赘述了.
主要实现的功能是登陆知乎,然后将个人收藏的问题和答案获取到之后保存为word文档,以便没有网络的时候可以查阅.当然,答案中如果有图片的话也是可以获取到的.不过这块还是有点问题的.等以后有时间了在修改修改吧.
还有就是正则,用的简直不要太烂…鄙视下自己…
还有,现在是问题的话所有的答案都会保存下来的.看看有时间修改成只保存第一个答案或者收藏页问题的答案吧.要不然如果收藏的太多了的话保存下来的word会吓你一跳的哦.O(∩_∩)O哈哈~
在登陆的时候可能会需要验证码,如果提示输入验证码的话在程序的文件夹下面就可以看到验证码的图片,照着输入就ok了.
#-*-coding:utf-8-*-
#登陆知乎抓取个人收藏然后保存为word
importsys
reload(sys)
sys.setdefaultencoding('utf-8')
importurllib
importurllib2
importcookielib
importstring
importre
frombs4importBeautifulSoup
fromdocximportDocument
fromdocximport*
fromdocx.sharedimportInches
fromsysimportexit
importos
#这儿是因为在公司上网的话需要使用socket代理
#importsocks
#importsocket
#socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5,"127.0.0.1",8088)
#socket.socket=socks.socksocket
loginurl='http://www.zhihu.com/login'
headers={'User-Agent':'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/34.0.1847.116Safari/537.36',}
postdata={
'_xsrf':'acab9d276ea217226d9cc94a84a231f7',
'email':'',
'password':'',
'rememberme':'y'
}
ifnotos.path.exists('myimg'):
os.mkdir('myimg')
ifos.path.exists('123.docx'):
os.remove('123.docx')
ifos.path.exists('checkcode.gif'):
os.remove('checkcode.gif')
mydoc=Document()
questiontitle=''
#----------------------------------------------------------------------
defdealimg(imgcontent):
soup=BeautifulSoup(imgcontent)
try:
forimglinkinsoup.findAll('img'):
ifimglinkisnotNone:
myimg=imglink.get('src')
#printmyimg
ifmyimg.find('http')>=0:
imgsrc=urllib2.urlopen(myimg).read()
imgnamere=re.compile(r'http\S*/')
imgname=imgnamere.sub('',myimg)
#printimgname
withopen(u'myimg'+'/'+imgname,'wb')ascode:
code.write(imgsrc)
mydoc.add_picture(u'myimg/'+imgname,width=Inches(1.25))
except:
pass
strinfo=re.compile(r'<noscript>[\s\S]*</noscript>')
imgcontent=strinfo.sub('',imgcontent)
strinfo=re.compile(r'<imgclass[\s\S]*</>')
imgcontent=strinfo.sub('',imgcontent)
#showall
strinfo=re.compile(r'<aclass="toggle-expand[\s\S]*</a>')
imgcontent=strinfo.sub('',imgcontent)
strinfo=re.compile(r'<aclass="wrapexternal"[\s\S]*rel="nofollownoreferrer"target="_blank">')
imgcontent=strinfo.sub('',imgcontent)
imgcontent=imgcontent.replace('<iclass="icon-external"></i></a>','')
imgcontent=imgcontent.replace('</b>','').replace('</p>','').replace('<p>','').replace('<p>','').replace('<br>','')
returnimgcontent
defenterquestionpage(pageurl):
html=urllib2.urlopen(pageurl).read()
soup=BeautifulSoup(html)
questiontitle=soup.title.string
mydoc.add_heading(questiontitle,level=3)
fordivinsoup.findAll('div',{'class':'fixed-summaryzm-editable-contentclearfix'}):
#printdiv
conent=str(div).replace('<divclass="fixed-summaryzm-editable-contentclearfix">','').replace('</div>','')
conent=conent.decode('utf-8')
conent=conent.replace('<br/>','\n')
conent=dealimg(conent)
###这一块弄得太复杂了有时间找找看有没有处理html的模块
conent=conent.replace('<divclass="fixed-summary-mask">','').replace('<blockquote>','').replace('<b>','').replace('<strong>','').replace('</strong>','').replace('<em>','').replace('</em>','').replace('</blockquote>','')
mydoc.add_paragraph(conent,style='BodyText3')
"""file=open('222.txt','a')
file.write(str(conent))
file.close()"""
defentercollectpage(pageurl):
html=urllib2.urlopen(pageurl).read()
soup=BeautifulSoup(html)
fordivinsoup.findAll('div',{'class':'zm-item'}):
h2content=div.find('h2',{'class':'zm-item-title'})
#printh2content
ifh2contentisnotNone:
link=h2content.find('a')
mylink=link.get('href')
quectionlink='http://www.zhihu.com'+mylink
enterquestionpage(quectionlink)
printquectionlink
defloginzhihu():
postdatastr=urllib.urlencode(postdata)
'''
cj=cookielib.LWPCookieJar()
cookie_support=urllib2.HTTPCookieProcessor(cj)
opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(opener)
'''
h=urllib2.urlopen(loginurl)
request=urllib2.Request(loginurl,postdatastr,headers)
request.get_origin_req_host
response=urllib2.urlopen(request)
#printresponse.geturl()
text=response.read()
collecturl='http://www.zhihu.com/collections'
req=urllib2.urlopen(collecturl)
ifstr(req.geturl())=='http://www.zhihu.com/?next=%2Fcollections':
print'loginfail!'
return
txt=req.read()
soup=BeautifulSoup(txt)
count=0
divs=soup.findAll('div',{'class':'zm-item'})
ifdivsisNone:
print'loginfail!'
return
print'loginok!\n'
fordivindivs:
link=div.find('a')
mylink=link.get('href')
collectlink='http://www.zhihu.com'+mylink
entercollectpage(collectlink)
printcollectlink
#这儿是当时做测试用的,值获取一个收藏
#count+=1
#ifcount==1:
#return
defgetcheckcode(thehtml):
soup=BeautifulSoup(thehtml)
div=soup.find('div',{'class':'js-captchacaptcha-wrap'})
ifdivisnotNone:
#printdiv
imgsrc=div.find('img')
imglink=imgsrc.get('src')
ifimglinkisnotNone:
imglink='http://www.zhihu.com'+imglink
imgcontent=urllib2.urlopen(imglink).read()
withopen('checkcode.gif','wb')ascode:
code.write(imgcontent)
returnTrue
else:
returnFalse
returnFalse
if__name__=='__main__':
importgetpass
username=raw_input('inputusername:')
password=getpass.getpass('Enterpassword:')
postdata['email']=username
postdata['password']=password
postdatastr=urllib.urlencode(postdata)
cj=cookielib.LWPCookieJar()
cookie_support=urllib2.HTTPCookieProcessor(cj)
opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(opener)
h=urllib2.urlopen(loginurl)
request=urllib2.Request(loginurl,postdatastr,headers)
response=urllib2.urlopen(request)
txt=response.read()
ifgetcheckcode(txt):
checkcode=raw_input('inputcheckcode:')
postdata['captcha']=checkcode
loginzhihu()
mydoc.save('123.docx')
else:
loginzhihu()
mydoc.save('123.docx')
print'theend'
raw_input()
好了,大概就是这样,大家如果有什么好的建议或者什么的可以再下面留言,我会尽快回复的.或者在小站的关于页面有我的联系方式,直接联系我就ok.