1 import requests 2 3 4 class TiebaSpider(object): 5 def __init__(self, tieba_name): # tieba_name为要爬取贴吧的名称 6 self.tieba_name = tieba_name 7 self.url_temp = ‘https://tieba.baidu.com/f?kw=‘ + tieba_name + ‘&ie=utf-8&pn={}‘ 8 self.headers = { 9 ‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36‘, 10 } 11 12 def get_url_list(self): # 构造url列表 13 # url_list = [] 14 # for i in range(1000): 15 # url_list.append(self.url_temp.format(i * 50)) 16 # return url_list 17 return [self.url_temp.format(i*50) for i in range(1000)] # 列表推导式替换上面代码 18 19 def parse_url(self, url): # 发送请求,获取响应 20 print(url) 21 response = requests.get(url, headers=self.headers) 22 return response.content.decode() 23 24 def save_html(self, html_str, page_num): # 保存html字符串 25 file_path = ‘{}-第{}页.html‘.format(self.tieba_name, page_num) 26 with open(file_path, ‘w‘, encoding=‘utf-8‘) as f: # 样例: 李毅-第一页.html 27 f.write(html_str) 28 29 def run(self): # 实现主要逻辑 30 # 1.构造url列表 31 url_list = self.get_url_list() 32 # 2.遍历,发送请求,获取响应 33 for url in url_list: 34 html_str = self.parse_url(url) 35 # 3.保存 36 page_num = url_list.index(url) + 1 # 页码数 37 self.save_html(html_str, page_num) 38 39 40 if __name__ == ‘__main__‘: 41 tieba_spider = TiebaSpider(‘李毅‘) 42 tieba_spider.run()
爬取百度贴吧前1000页内容(requests库面向对象思想实现)
原文:https://www.cnblogs.com/springionic/p/11085296.html