Python如何抓取天猫商品详细信息及交易记录
本文实例为大家分享了Python抓取天猫商品详细信息及交易记录的具体代码,供大家参考,具体内容如下
一、搭建Python环境
本帖使用的是Python2.7
涉及到的模块:spynner,scrapy,bs4,pymmssql
二、要获取的天猫数据
三、数据抓取流程
四、源代码
#coding:utf-8 importspynner fromscrapy.selectorimportSelector frombs4importBeautifulSoup importrandom importpymssql #------------------------接数据库-----------------------------# server="localhost" user="sa" password="123456" conn=pymssql.connect(server,user,password,"TmallData") ifconn: print"DataBaseconnectingsuccessfully!" else: print"DataBaseconnectingerror!" cursor=conn.cursor() #----------------------定义网页操作函数--------------------------# defpy_click_element(browser,pos): #点击网页中的元素 #posexample:'a[href="#description"rel="externalnofollow"rel="externalnofollow"]' browser.click(pos) browser.wait(random.randint(3,10)) returnbrowser defpy_click_xpath(browser,xpath): xpath=xpath+'/@href' inner_href=Selector(text=browser.html).xpath(xpath).extract() pos='a[href="'+str(inner_href[0])+'"rel="externalnofollow"]' browser=py_click_element(browser,pos) returnbrowser defpy_webpage_load(browser,url): browser.load(url,load_timeout=60) browser.wait(10) returnbrowser defpy_check_element(browser,xpath): #按照xpath查找元素,如果存在则返回True,否则返回False ifSelector(text=browser.html).xpath(xpath).extract()!=[]: returnTrue else: returnFalse defpy_extract_xpath(browser,xpath): ifpy_check_element(browser,xpath): returnSelector(text=browser.html).xpath(xpath).extract()[0] else: return"none" defpy_extract_xpaths(browser,xpaths): #批量提取网页内容 length=len(xpaths) results=[0]*length foriinrange(length): results[i]=py_extract_xpath(browser,xpaths[i]) returnresults #-----------------------------数据库操作函数---------------------------# #-----------------------------数据提取函数----------------------------# defpy_getDealReord(doc): soup=BeautifulSoup(doc,'lxml') tr=soup.find_all('tr') total_dealRecord=[([0]*5)foriinrange(len(tr))] i=-1 forthis_trintr: i=i+1 td_user=this_tr.find_all('td',attrs={'class':"cell-align-lbuyer"}) forthis_tdintd_user: total_dealRecord[i][0]=this_td.getText().strip('') #printusername td_style=this_tr.find_all('td',attrs={'class':"cell-align-lstyle"}) forthis_tdintd_style: total_dealRecord[i][1]=this_td.getText(',').strip('') #printstyle td_quantity=this_tr.find_all('td',attrs={'class':"quantity"}) forthis_tdintd_quantity: total_dealRecord[i][2]=this_td.getText().strip('') #printquantity td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"}) forthis_tdintd_dealtime: total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText() total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText() returntotal_dealRecord #--------------------获取要抓取的所有商品链接-----------------------# cursor.execute(""" select*fromProductURLswhereBrandName='NB' """) file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt") InProductInfo=cursor.fetchall() browser=spynner.Browser() fortemp_InProductInfoinInProductInfo: url='https:'+temp_InProductInfo[2] BrandName=temp_InProductInfo[0] ProductType=temp_InProductInfo[1] printBrandName,'\t',ProductType,'\t',url #url='https://detail.tmall.com/item.htm?id=524425656711&rn=77636d6db8dea5e30060976fdaf9768d&abbucket=19' try: browser=py_webpage_load(browser,url) except: print"Loadingwebpagefailed." file.write(url) file.write('\n') continue xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()',\ '//*[@id="J_StrPriceModBox"]/dd/span/text()',\ '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()',\ '//*[@id="J_PostageToggleCont"]/p/span/text()',\ '//*[@id="J_EmStock"]/text()',\ '//*[@id="J_CollectCount"]/text()',\ '//*[@id="J_ItemRates"]/div/span[2]/text()',\ '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()'] out_ProductInfo=py_extract_xpaths(browser,xpaths) browser=py_click_element(browser,'a[href="#description"rel="externalnofollow"rel="externalnofollow"]') ProductProperty=py_extract_xpath(browser,'//*[@id="J_AttrUL"]') soup=BeautifulSoup(ProductProperty,'lxml') li=soup.find_all('li') prop='' forthis_liinli: prop=prop+this_li.getText()+'\\' prop=prop[0:len(prop)-1] out_ProductProperty=prop printout_ProductProperty cursor.execute(""" Insertintopy_ProductInfovalues(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) """,(BrandName,ProductType,url,\ out_ProductInfo[2],out_ProductInfo[1],\ out_ProductInfo[0],out_ProductInfo[7],\ out_ProductInfo[1],out_ProductInfo[3],\ out_ProductInfo[4],out_ProductInfo[5],\ out_ProductProperty)) conn.commit() Deal_PageCount=0 browser=py_click_element(browser,'a[href="#J_DealRecord"rel="externalnofollow"]') #browser.browse(True) DealRecord=py_extract_xpath(browser,'//*[@id="J_showBuyerList"]/table/tbody') out_DealRecord=py_getDealReord(DealRecord) fortemp_DealRecordinout_DealRecord: ifstr(temp_DealRecord[4])=='0': continue cursor.execute(""" InsertintoDealRecordvalues(%s,%s,%s,%s,%s,%s) """,(url,temp_DealRecord[0],temp_DealRecord[1],\ temp_DealRecord[2],temp_DealRecord[3],\ temp_DealRecord[4])) conn.commit() Deal_PageCount=Deal_PageCount+1 print"Page",Deal_PageCount foriinrange(6): if(i==0)or(i==2): continue xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']' ifpy_check_element(browser,xpath): browser=py_click_xpath(browser,xpath) DealRecord=py_extract_xpath(browser,'//*[@id="J_showBuyerList"]/table/tbody') out_DealRecord=py_getDealReord(DealRecord) fortemp_DealRecordinout_DealRecord: ifstr(temp_DealRecord[4])=='0': continue cursor.execute(""" InsertintoDealRecordvalues(%s,%s,%s,%s,%s,%s) """,(url,temp_DealRecord[0],temp_DealRecord[1],\ temp_DealRecord[2],temp_DealRecord[3],\ temp_DealRecord[4])) conn.commit() Deal_PageCount=Deal_PageCount+1 print"Page",Deal_PageCount whilepy_check_element(browser,'//*[@id="J_showBuyerList"]/div/div/a[6]'): browser=py_click_xpath(browser,'//*[@id="J_showBuyerList"]/div/div/a[6]') DealRecord=py_extract_xpath(browser,'//*[@id="J_showBuyerList"]/table/tbody') out_DealRecord=py_getDealReord(DealRecord) fortemp_DealRecordinout_DealRecord: ifstr(temp_DealRecord[4])=='0': continue cursor.execute(""" InsertintoDealRecordvalues(%s,%s,%s,%s,%s,%s) """,(url,temp_DealRecord[0],temp_DealRecord[1],\ temp_DealRecord[2],temp_DealRecord[3],\ temp_DealRecord[4])) conn.commit() Deal_PageCount=Deal_PageCount+1 print"Page",Deal_PageCount
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持毛票票。