首页 > 数据库技术 > 详细

PYTHON 爬虫笔记十:利用selenium+PyQuery实现淘宝美食数据搜集并保存至MongeDB(实战项目三)

时间:2018-08-14 00:50:58      阅读:240      评论:0      收藏:0      [点我收藏+]

利用selenium+PyQuery实现淘宝美食数据搜集并保存至MongeDB

  •  目标站点分析

  •  流程框架

  • 爬虫实战

  1. spider详情页

    import pymongo
    import re
    
    from selenium import webdriver
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from pyquery import PyQuery as pq
    from config import *
    import pymongo
    
    client = pymongo.MongoClient(MONGO_URL)
    db = client[MONGO_DB]
    
    
    
    #browser = webdriver.Chrome()
    browser = webdriver.PhantomJS(service_args=SERVICE_ARGS)    #创建PhantomJS浏览器
    wait = WebDriverWait(browser, 10)
    
    browser.set_window_size(1400,900)
    def search():       #请求页面
        print(正在搜索。。。)
        try:
            browser.get(https://world.taobao.com/)   #请求淘宝首页
            input = wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, #mq))
            )
            submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,#J_PopSearch > div.sb-search > div > form > input[type="submit"]:nth-child(2))))
            input.send_keys(KEYWORD)
            submit.click()
            total = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,#mainsrp-pager > div > div > div > div.total)))
            get_products()
            return total.text
        except TimeoutError:
            total = search()
            print(total)
    
    def next_page(page_number): #翻页操作
        print(正在翻页。。。,page_number)
        try:
            input = wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, #mainsrp-pager > div > div > div > div.form > input))#判断页面是否加载出输入框
            )
            submit = wait.until(EC.element_to_be_clickable(
                (By.CSS_SELECTOR, #mainsrp-pager > div > div > div > div.form > span.btn.J_Submit)))  #判断是否加载出搜索按钮
            input.clear()
            input.send_keys(page_number)
            submit.click()
            wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,#mainsrp-pager > div > div > div > ul > li.item.active > span),str(page_number)))
                        #在做结果判断的时候,经常想判断某个元素中是否存在指定的文本,
            get_products()
        except TimeoutError:
            next_page(next_page())
    
    def get_products():
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, #mainsrp-itemlist .items .item)))
        html = browser.page_source      #获取详情页html代码
        doc = pq(html)      #创建一个Pyquery对象
        items = doc(#mainsrp-itemlist .items .item).items()  #css选择器获取所以items ,调用items方法取得所取的内容
        for item in items:
            producet = {
                title: item.find(.title).text(),
                location: item.find(.location).text(),
                price:item.find(.price).text(),
                deal:item.find(.deal-cnt).text()[:-3],
                shop:item.find(.shop).text(),
                image: item.find(.pic .img).attr(src),
            }
    
            print(producet)
            save_to_monge(producet)
    
    def save_to_monge(result):
        try:
            if db[MONGO_TABLE].insert(result):
                print(存储成功!,result)
        except Exception:
            print(存储失败!,result)
    def main():
        try:
            total = search()
            total = int(re.compile((\d+)).search(total).group(1))
            for i in range(2,total+1):
                next_page(i)
        except Exception:
            print(出错啦)
        browser.close()
    
    if __name__ == __main__:
        main()
  2. config配置页

    MONGO_URL=localhost
    MONGO_DB=taobao
    MONGO_TABLE=taobao
    
    SERVICE_ARGS = [--load-images=false,--disk-cache=false]
    
    KEYWORD =美食

     

PYTHON 爬虫笔记十:利用selenium+PyQuery实现淘宝美食数据搜集并保存至MongeDB(实战项目三)

原文:https://www.cnblogs.com/darwinli/p/9471895.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!