Python使用Selenium爬取淘宝异步加载的数据方法
淘宝的页面很复杂,如果使用分析ajax或者js的方式,很麻烦
抓取淘宝‘美食'上面的所有食品信息
spider.py
#encoding:utf8
importre
fromseleniumimportwebdriver
fromselenium.common.exceptionsimportTimeoutException
fromselenium.webdriver.common.byimportBy
fromselenium.webdriver.support.uiimportWebDriverWait
fromselenium.webdriver.supportimportexpected_conditionsasEC
frombs4importBeautifulSoup
fromconfigimport*
importpymongo
client=pymongo.MongoClient(MONGODB_URL)
db=client[MONGODB_DB]
##这里使用PhantomJS,并配置了一些参数
browser=webdriver.PhantomJS(service_args=SERVICE_ArGS)
##窗口的大小,不设置的话,默认太小,会有问题
browser.set_window_size(1400,900)
wait=WebDriverWait(browser,10)
defsearch():
print('正在搜索')
##容易出现超时的错误
try:
##等待这两个模块都加载好
browser.get("https://www.taobao.com")
input=wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR,'#q'))
)
submit=wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR,'#J_TSearchForm>div.search-button>button'))
)
######这块python2搞得鬼
#input.send_keys('\u7f8e\u98df'.decode("unicode-escape"))
input.send_keys(KEYWORD.decode("unicode-escape"))
submit.click()
total=wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager>div>div>div>div.total'))
)
get_product()
returntotal.text
exceptTimeoutException:
returnsearch()
defnext_page(page_number):
print('翻页'+str(page_number))
try:
input=wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager>div>div>div>div.form>input'))
)
submit=wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR,'#mainsrp-pager>div>div>div>div.form>span.btn.J_Submit'))
)
input.clear()
input.send_keys(page_number)
submit.click()
##判断是否翻页成功高亮的是不是输入的值,直接加在后面即可
wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager>div>div>div>ul>li.item.active>span'),str(page_number)))
get_product()
exceptTimeoutException:
returnnext_page(page_number)
#获取产品信息
defget_product():
products=wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-itemlist.m-itemlist.items'))
)
##拿到网页
html=browser.page_source
soup=BeautifulSoup(html,'lxml')
items=soup.select('#mainsrp-itemlist.m-itemlist.items.item.J_MouserOnverReq')#
print('*************************到此*************')
foriteminitems:
img=item.select('.J_ItemPic.img')[0].get('src')
price=item.select('.price.g_price.g_price-highlight>strong')[0].get_text()
deal=item.select('.deal-cnt')[0].get_text()
title=item.select('.row.row-2.title>a')[0].get_text().strip()#:nth-of-type(3)
shop=item.select('.row.row-3.g-clearfix>.shop>a>span:nth-of-type(2)')[0].get_text()
location=item.select('.location')[0].get_text()
product={
'img':img,
'price':price,
'deal':deal,
'title':title,
'shop':shop,
'location':location
}
#打印一下
importjson
j=json.dumps(product)
dict2=j.decode("unicode-escape")
printdict2
save_to_mongo(product)
defsave_to_mongo(product):
try:
ifdb[MONGODB_TABLE].insert(product):
print('存储到mongodb成功'+str(product))
exceptException:
print("存储到mongodb失败"+str(product))
defmain():
try:
total=search()
##搜寻re正则表达式
s=re.compile('(\d+)')
total=int(s.search(total).group(1))
foriinrange(2,total+1):
next_page(i)
exceptException:
print('出错')
finally:
browser.close()
if__name__=='__main__':
main()
config.py
MONGODB_URL='localhost' MONGODB_DB='taobao' MONGODB_TABLE='meishi' SERVICE_ArGS=['--load-images=false','--disk-cache=true'] ##就是美食这两个字,直接用汉字会报错 KEYWORD='\u7f8e\u98df'
以上这篇Python使用Selenium爬取淘宝异步加载的数据方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持毛票票。