一、爬虫的基本过程:
1、发送请求(请求库:request,selenium)
2、获取响应数据()服务器返回
3、解析并提取数据(解析库:re,BeautifulSoup,Xpath)
4、保存数据(储存库)MongoDB
二、爬取“梨视频”中的某一个视频
1 # 爬取梨视频
2 import requests
3 url=‘https://video.pearvideo.com/mp4/adshort/20190613/cont-1565846-14013215_adpkg-ad_hd.mp4‘
4 res = requests.get(url)
5 #将爬取的视频写入文件
6 with open(‘梨视频.mp4‘, ‘wb‘) as f:
7 f.write(res.content)
三、正则表达式的使用
1、re.find.all(‘正则匹配规则‘,‘解析文本’,“正则模式”)
2、re.S:全局模式(对整个文本进行匹配)
3、指的是当前位置
4、*指的是查找所有
四、爬取整个“梨视频”中的视频
1 import requests 2 import re 3 import uuid 4 5 #1、发送请求 6 def get_page(url): 7 response=requests.get(url) 8 return response 9 10 #2、解析数据 11 def parse_index(text): 12 res=re.findall(‘<a href="video_(.*?)"‘,text,re.S) 13 14 detail_url_list=[] 15 for m_id in res: 16 detail_url=‘https://www.pearvideo.com/video_‘+m_id 17 detail_url_list.append(detail_url) 18 19 return detail_url_list 20 21 #解析详情页获取视频url 22 def parse_detail(text): 23 movie_url=re.findall(‘srcUrl="(.*?)"‘,text,re.S)[0] 24 return movie_url 25 26 #3、保存数据 27 def save_movie(movie_url): 28 response=requests.get(movie_url) 29 with open(f‘{uuid.uuid4()}.mp4‘,‘wb‘)as f: 30 f.write(response.content) 31 f.flush() 32 33 34 #主函数:(输入‘main‘,然后按”回车“) 35 if __name__ == ‘__main__‘: 36 #1、对主页发送请求 37 index_res=get_page(url=‘https://www.pearvideo.com/‘) 38 #2、对主页进行解析、获取详情页id 39 detail_url_list=parse_index(index_res.text) 40 41 #3、对每个详情页url发送请求 42 for detail_url in detail_url_list: 43 detail_res=get_page(url=detail_url) 44 45 #4、解析详情页获取视频url 46 movie_url=parse_detail(detail_res.text) 47 print(movie_url) 48 49 #5、保存视频 50 save_movie(movie_url)
五、多线程爬取整个“梨视频”中的视频
1 import requests 2 import re # 正则模块 3 # uuid.uuid4() 可以根据时间戳生成一段世界上唯一的随机字符串 4 import uuid 5 # 导入线程池模块 6 from concurrent.futures import ThreadPoolExecutor 7 # 线程池限制50个线程 8 pool = ThreadPoolExecutor(50) 9 10 # 爬虫三部曲 11 12 # 1、发送请求 13 def get_page(url): 14 print(f‘开始异步任务: {url}‘) 15 response = requests.get(url) 16 return response 17 18 19 # 2、解析数据 20 # 解析主页获取视频详情页ID 21 def parse_index(res): 22 23 response = res.result() 24 # 提取出主页所有ID 25 id_list = re.findall(‘<a href="video_(.*?)"‘, response.text, re.S) 26 # print(res) 27 28 # 循环id列表 29 for m_id in id_list: 30 # 拼接详情页url 31 detail_url = ‘https://www.pearvideo.com/video_‘ + m_id 32 # print(detail_url) 33 # 把详情页url提交给get_page函数 34 pool.submit(get_page, detail_url).add_done_callback(parse_detail) 35 36 37 # 解析详情页获取视频url 38 def parse_detail(res): 39 response = res.result() 40 movie_url = re.findall(‘srcUrl="(.*?)"‘, response.text, re.S)[0] 41 # 异步提交把视频url传给get_page函数,把返回的结果传给save_movie 42 pool.submit(get_page, movie_url).add_done_callback(save_movie) 43 44 45 # 3、保存数据 46 def save_movie(res): 47 48 movie_res = res.result() 49 50 # 把视频写到本地 51 with open(f‘{uuid.uuid4()}.mp4‘, ‘wb‘) as f: 52 f.write(movie_res.content) 53 print(f‘视频下载结束: {movie_res.url}‘) 54 f.flush() 55 56 57 if __name__ == ‘__main__‘: # main + 回车键 58 59 # 一 往get_page发送异步请求,把结果交给parse_index函数 60 url = ‘https://www.pearvideo.com/‘ 61 pool.submit(get_page, url).add_done_callback(parse_index)
原文:https://www.cnblogs.com/lweiser/p/11035236.html