1 爬虫:网络蜘蛛
2 爬虫本质:模拟浏览器发送请求(requests,selenium)->下载网页代码->只提取有用的数据(bs4,xpath,re)->存放于数据库或文件中(文件,excel,mysql,redis,mongodb)
3 发送请求:请求地址(浏览器调试,抓包工具),请求头(难),请求体(难),请求方法
4 拿到响应:拿到响应体(json格式,xml格式,html格式(bs4,xpath),加密的未知格式(需要解密))
5 入库:Mongodb(json格式数据)
6 性能高一些(多线程,多进程,协程),只针对与python语言的cpython解释器(GIL:同一时刻只能由一个线程在执行)
-io密集型:用线程
-计算密集型:用进程
7 scrapy框架处理了性能
1 安装:pip3 install requests
2 图片防盗链:referer 从哪个网页来的
import requests
# 1 发送get请求
# res是python的对象,对象里,响应头,响应体。。。。
header = {
‘user-agent‘: ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36‘,
‘referer‘: ‘https://www.mzitu.com/225078/2‘
}
res = requests.get(‘https://www.mzitu.com/‘, headers=header)
print(res.text)
res1 = requests.get(‘https://i3.mmzztt.com/2020/03/14a02.jpg‘, headers=header)
print(res1.text) # 网页文本
print(res1.content) # 二进制内容
res.json() # json 格式
with open(‘a.jpg‘, ‘wb‘)as f:
for line in res1.iter_content():
f.write(line)
# 2 请求地址中携带数据(两种方式,推荐第二种)
# url的编码和解码
from urllib.parse import urlencode,unquote
print(unquote(‘%E7%BE%8E%E5%A5%B3‘))
# %E7%BE%8E%E5%A5%B3
header = {
‘user-agent‘: ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36‘,
}
res=requests.get(‘https://www.baidu.com/s?wd=美女‘,headers=header)
res=requests.get(‘https://www.baidu.com/s‘,headers=header,params={‘wd‘:‘美女‘})
# 3 请求带cookie(两种方式)
# 方式一,在header中放
header = {
‘user-agent‘: ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36‘,
‘cookie‘:‘key=asdfasdfasdfsdfsaasdf;key2=asdfasdf;key3=asdfasdf‘
}
res=requests.get(‘http://127.0.0.1:8000/index/‘,headers=header)
# 方式二:
header = {
‘user-agent‘: ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36‘,
}
cookies是一个字典或者CookieJar对象
res=requests.get(‘http://127.0.0.1:8000/index/‘,headers=header,cookies={‘key‘:‘asdfasdf‘})
# 4 发送post请求,携带数据(urlencoded和json)
res=requests.post(‘http://127.0.0.1:8000/index/‘,data={‘name‘:‘lqz‘})
res=requests.post(‘http://127.0.0.1:8000/index/‘,json={‘age‘:1,},)
# 5 自动携带cookie
session=requests.session()
res=session.post(‘http://127.0.0.1:8000/index/‘) # 假设这个请求登录了
res1=session.get(‘http://127.0.0.1:8000/order/‘) # 不需要手动带cookie,session会处理
# 6 response对象
respone=requests.post(‘http://127.0.0.1:8000/index/‘,data={‘name‘:‘lqz‘})
print(respone.text) # 响应的文本
print(respone.content) # 响应体的二进制
print(respone.status_code) # 响应状态码
print(respone.headers) # 响应头
print(respone.cookies) # cookie
print(respone.cookies.get_dict()) # 把cookie转成字典
print(respone.cookies.items()) # key和value
print(respone.url) # 请求的url
print(respone.history) #[]放重定向之前的地址
print(respone.encoding) # 响应的编码方式
respone.iter_content() # 图片,视频,大文件,一点一点循环取出来
for line in respone.iter_content():
f.write(line)
# 7 编码问题
res=requests.get(‘http://www.autohome.com/news‘)
# # 一旦打印出来出现乱码问题
# # 方式一
res.encoding=‘gb2312‘
# # 方式二
res.encoding=res.apparent_encoding
# 8 解析json
import json
respone=requests.post(‘http://127.0.0.1:8000/index/‘,data={‘name‘:‘lqz‘})
print(type(respone.text)) # 响应的文本
print(json.loads(respone.text))
print(respone.json()) # 相当于上面那句话
# 9 高级用法之ssl(了解)
import requests
respone=requests.get(‘https://www.12306.cn‘) #不验证证书,报警告,返回200
# 使用证书,需要手动携带
import requests
respone=requests.get(‘https://www.12306.cn‘,
cert=(‘/path/server.crt‘,
‘/path/key‘))
# 10 高级用法:使用代理
espone=requests.get(‘http://127.0.0.1:8000/index/‘,proxies={‘http‘:‘代理的地址和端口号‘,})
# 代理,免费代理,收费代理花钱买
# 代理池:列表放了一堆代理ip,每次随机取一个,再发请求就不会封ip了
# 高匿和透明代理?如果使用高匿代理,后端无论如何拿不到你的ip,使用透明,后端能够拿到你的ip
# 后端如何拿到透明代理的ip, 后端:X-Forwarded-For
respone=requests.get(‘https://www.baidu.com/‘,proxies={‘http‘:‘27.46.20.226:8888‘,})
# 11 超时设置
import requests
respone=requests.get(‘https://www.baidu.com‘,
timeout=0.0001)
# 12 认证设置(没有用的了)
import requests
r=requests.get(‘xxx‘,auth=(‘user‘,‘password‘))
# 13 异常处理
import requests
from requests.exceptions import * #可以查看requests.exceptions获取异常类型
try:
r=requests.get(‘http://www.baidu.com‘,timeout=0.00001)
except Exception as e:
print(e)
# 14 上传文件
res=requests.post(‘http://127.0.0.1:8000/index/‘,files={‘myfile‘:open(‘a.jpg‘,‘rb‘)})
#http://www.aa7a.cn/
import requests
session=requests.session()
data = {
‘username‘: ‘616564099@qq.com‘,
‘password‘: ‘lqz123‘,
‘captcha‘: ‘zdu4‘,
‘remember‘: 1,
‘ref‘: ‘http://www.aa7a.cn/user.php?act=logout‘,
‘act‘: ‘act_login‘,
}
rest = session.post(‘http://www.aa7a.cn/user.php‘,data=data)
print(rest.text)
# 拿到cookie
cookie=rest.cookies
print(cookie)
# 携带着cookies,表示登录了,页面中会有我们的用户信息616564099@qq.com
rest1=session.get(‘http://www.aa7a.cn/index.php‘)
# rest1=requests.get(‘http://www.aa7a.cn/index.php‘)
print(‘616564099@qq.com‘ in rest1.text)
#https://www.pearvideo.com/
import requests
import re
res=requests.get(‘https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=1&start=0‘)
# print(res.text)
re_video=‘<a href="(.*?)" class="vervideo-lilink actplay">‘
video_urls=re.findall(re_video,res.text)
# https://www.pearvideo.com/
# print(video_urls)
for video in video_urls:
url=‘https://www.pearvideo.com/‘+video
print(url)
# 向视频详情发送get请求
res_video=requests.get(url)
# print(res_video.text)
# break
re_video_mp4=‘hdUrl="",sdUrl="",ldUrl="",srcUrl="(.*?)",vdoUrl=srcUrl,skinRes‘
video_url=re.findall(re_video_mp4,res_video.text)[0]
print(video_url)
video_name=video_url.rsplit(‘/‘,1)[-1]
print(video_name)
res_video_content=requests.get(video_url)
with open(video_name,‘wb‘) as f:
for line in res_video_content.iter_content():
f.write(line)
原文:https://www.cnblogs.com/pythonwl/p/13411663.html