Python标准库中提供了:urllib、urllib2、httplib等模块以供Http请求,但是,它的 API 太渣了。它是为另一个时代、另一个互联网所创建的。它需要巨量的工作,甚至包括各种方法覆盖,来完成最简单的任务。
Requests 是使用 Apache2 Licensed 许可证的 基于Python开发的HTTP 库,其在Python内置模块的基础上进行了高度的封装,从而使得Pythoner进行网络请求时,变得美好了许多,使用Requests可以轻而易举的完成浏览器可有的任何操作。
1、GET请求
# 1、无参数实例 import requests ret = requests.get(‘https://github.com/timeline.json‘) print(ret.url) print(ret.text) # 2、有参数实例 import requests payload = {‘key1‘: ‘value1‘, ‘key2‘: ‘value2‘} ret = requests.get("http://httpbin.org/get", params=payload) print (ret.url) print (ret.text)
2、POST请求
# 1、基本POST实例 import requests payload = {‘key1‘: ‘value1‘, ‘key2‘: ‘value2‘} ret = requests.post("http://httpbin.org/post", data=payload) print(ret.text) # 2、发送请求头和数据实例 import requests import json url = ‘https://api.github.com/some/endpoint‘ payload = {‘some‘: ‘data‘} headers = {‘content-type‘: ‘application/json‘} ret = requests.post(url, data=json.dumps(payload), headers=headers) print(ret.text) print(ret.cookies)
3、其他请求
requests.get(url, params=None, **kwargs) requests.post(url, data=None, json=None, **kwargs) requests.put(url, data=None, **kwargs) requests.head(url, **kwargs) requests.delete(url, **kwargs) requests.patch(url, data=None, **kwargs) requests.options(url, **kwargs) # 以上方法均是在此方法的基础上构建 requests.request(method, url, **kwargs)
4、更多参数
参数 2.1 url
requests.post("http://httpbin.org/post", data=payload)
requests.request(method=‘post‘, url=‘http://127.0.0.1:8000/test/‘) 2.2 headers
# 发送请求头到服务器端
requests.request(
method=‘POST‘, url=‘http://127.0.0.1:8000/test/‘,
json={‘k1‘: ‘v1‘, ‘k2‘: ‘水电费‘},
headers={‘Content-Type‘: ‘application/x-www-form-urlencoded‘}
) 2.3 cookies
# 发送Cookie到服务器端
requests.request(
method=‘POST‘,
url=‘http://127.0.0.1:8000/test/‘,
data={‘k1‘: ‘v1‘, ‘k2‘: ‘v2‘},
cookies={‘cook1‘: ‘value1‘},
)
# 也可以使用CookieJar(字典形式就是在此基础上封装)
from http.cookiejar import CookieJar
from http.cookiejar import Cookie
obj = CookieJar()
obj.set_cookie(Cookie(version=0, name=‘c1‘, value=‘v1‘, port=None, domain=‘‘, path=‘/‘,
secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={‘HttpOnly‘: None},
rfc2109=False, port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False) )
requests.request(method=‘POST‘, url=‘http://127.0.0.1:8000/test/‘, data={‘k1‘: ‘v1‘, ‘k2‘: ‘v2‘}, cookies=obj)2.4 params
# - 可以是字典
# - 可以是字符串
# - 可以是字节(ascii编码以内)
requests.request(method=‘get‘, url=‘http://127.0.0.1:8000/test/‘, params={‘k1‘: ‘v1‘, ‘k2‘: ‘水电费‘})
requests.request(method=‘get‘, url=‘http://127.0.0.1:8000/test/‘, params="k1=v1&k2=水电费&k3=v3&k3=vv3")
requests.request(method=‘get‘, url=‘http://127.0.0.1:8000/test/‘, params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding=‘utf8‘)) 2.5 data,传请求体
requests.request(
method=‘POST‘,
url=‘http://127.0.0.1:8000/test/‘,
data="k1=v1;k2=v2;k3=v3;k3=v4",
headers={‘Content-Type‘: ‘application/x-www-form-urlencoded‘}
)
requests.request(
method=‘POST‘,
url=‘http://127.0.0.1:8000/test/‘,
data=open(‘data_file.py‘, mode=‘r‘, encoding=‘utf-8‘),
#文件内容是:k1=v1;k2=v2;k3=v3;k3=v4
headers={‘Content-Type‘: ‘application/x-www-form-urlencoded‘}
) requests.post( ..., data={‘user‘:‘root‘,‘pwd‘:‘123‘} ) GET /index http1.1\r\nhost:c1.com\r\n\r\nuser=root&pwd=123 2.6 json,传请求体 # 将json中对应的数据进行序列化成一个字符串,json.dumps(...) # 然后发送到服务器端的body中,并且Content-Type是 {‘Content-Type‘: ‘application/json‘} requests.request(
method=‘POST‘, url=‘http://127.0.0.1:8000/test/‘, json={‘k1‘: ‘v1‘, ‘k2‘: ‘水电费‘}
)
2.7代理
#无验证
proxie_dict = { "http": "77.75.105.165", "https": "77.75.105.123", }
ret = requests.get("http://www.baidu.com",proxies=proxie_dict)
print(ret.headers)
#验证代理
from requests.auth import HTTPProxyAuth
proxyDict = { ‘http‘: ‘77.75.105.165‘, ‘https‘: ‘77.75.105.165‘ }
auth = HTTPProxyAuth(‘username‘, ‘mypassword‘)
r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
print(r.text)
2.8上传文件 files
# 发送文件
file_dict = { ‘f1‘: open(‘xxxx.log‘,‘rb‘) }
requests.request( method=‘POST‘, url = ‘http://127.0.0.1:8000/test/‘, files=file_dict )
# 发送文件,定制文件名
file_dict = {
‘f1‘: (‘test.txt‘, open(‘readme‘, ‘rb‘))
}
requests.request(method=‘POST‘,url=‘http://127.0.0.1:8000/test/‘,files=file_dict)
# 发送文件,定制文件名 file_dict = { ‘f1‘: (‘test.txt‘, "hahsfaksfa9kasdjflaksdjf") } requests.request(method=‘POST‘,url=‘http://127.0.0.1:8000/test/‘,files=file_dict)
# 发送文件,定制文件名
file_dict = {
‘f1‘: (‘test.txt‘, "hahsfaksfa9kasdjflaksdjf", ‘application/text‘, {‘k1‘: ‘0‘})
}
requests.request(method=‘POST‘, url=‘http://127.0.0.1:8000/test/‘, files=file_dict)
2.9 认证 auth
内部: 用户名和密码,用户名和密码加密,放在请求头中传给后台。
-"用户|密码" -base64("用户:密码")
-"basic base64("用户|密码")"
-请求头: Authorization: "basic base64("用户|密码")"
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
ret = requests.get(‘https://api.github.com/user‘,auth=HTTPBasicAuth(‘qwqw‘,‘11213‘))
2.10超时
ret1 = requests.get(‘http://google.com/‘, timeout=1)
print(ret1)
ret2 = requests.get(‘http://google.com/‘, timeout=(5, 1))
print(ret2)
2.11是否允许重定向
allow_redirects ret = requests.get("http://127.0.0.1:8000/test/",allow_redirects=False)
print(ret.text)
2.12 大文件下载stream
ret = requests.get(‘http://127.0.0.1:8000/test/‘, stream=True)
print(ret.content)
ret.close()
from contextlib import closing with closing(requests.get(‘http://htttpbin.org/get‘,stream=True)) as r1: #在此处理响应
for i in r1.iter_content():
print(i)
2.13证书 cert
-百度、腾讯 =》不用携带证书(系统自动获取证书)
-自定义证书
requests.get(‘http://127.0.0.1:8000/test/‘,cert="xxxx/xxx/xxx.pem")
requests.get(‘http://127.0.0.1:8000/test/‘,cert=("xxxx/xxx/xxx.pem","xxx.xx.xx.key"))
2.14 确认
verify = False
2.15session
import requests session = requests.Session() ### 1、首先登陆任何页面,获取cookie i1 = session.get(url="http://dig.chouti.com/help/service") ### 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权 i2 = session.post( url="http://dig.chouti.com/login", data={ ‘phone‘: "8615131255089", ‘password‘: "xxxxxx", ‘oneMonth‘: "" } ) i3 = session.post( url="http://dig.chouti.com/link/vote?linksId=8589623", ) print(i3.text)
5、总结
1、爬虫基本操作 a、爬虫 -定向 -非定向 b、 下载页面: 筛选: 正则表达式 =========开源模块============================= 1、requests模块 -方法 -参数 -session session = requests.Session() session.get() session.post() response = requests.get(‘http://www.autohome.com.cn/news/‘) response.txt 总结: response = requests.get(‘URL‘) response.tetx response.content response.encoding response.aparent_encoding response.status_code 2、beautisoup模块 soup = BeautiSoup(response.txt,parser=‘html.parser‘) target=soup.find(id = ‘auto-channel-lazyload-article‘) 总结: find #匹配的第一个 find_all#匹配的所有 soup = beautifulsoup(‘<html>...</html>‘,features=‘html.parser‘) v1 = soup.find(‘div‘)#找soup孩子里面的第一个div v1 = soup.find(id=‘i1‘)#找soup孩子里面的第一个id=i1 v1 = soup.find(‘div‘,id=‘i1‘)#找soup孩子里面的第一个div并且id=i1 v2 = soup.find_all(‘div‘) v2 = soup.find_all(id=‘i1‘) v2 = soup.find_all(‘div‘,id=‘i1‘) obj = v1 obj = v2[0] obj.text#获取对象的文本 obj.attrs#获取对象的属性 登录: 页面刷新:form 表单提交 页面不刷新:Ajax提交 需求二: 通过程序自动登录github requests 需求三:爬取GitHub -带请求头 -带cookie -请求体中: commit: Sign in utf8: ? authenticity_token: iWlPKAsJ9nQNDaqC47P27GWx37a08iBv/0io8C4QPUluL1JxyWJSt0ZlgBBWv3BeFJ4ywbR5dKWzSqwzhILH6Q== login: Yun-Wangj password: yun258762 需求四:登录拉勾网 -密码加密 -找js,通过python实现加密方式 -找密文,密文<=>密文 -Referer头是上一次请求的地址,可用于做防盗链 总结: 请求头: user-agent referer host cookie 特殊请求头,查看上一次请求获取内容,如拉勾网 请求体: -原始数据 -原始数据 + token -密文 -找算法 -使用密文 两种套路: -post登录获取cookie,以后携带cookie, -get获取未授权cookie,post登录携带cookie去授权,以后携带cookie
官方文档:http://cn.python-requests.org/zh_CN/latest/user/quickstart.html#id4
BeautifulSoup是一个模块,该模块用于接收一个HTML或XML字符串,然后将其进行格式化,
之后遍可以使用他提供的方法进行快速查找指定元素,从而使得在HTML或XML中查找指定元素变得简单。
from bs4 import BeautifulSoup html_doc = """ <html><head><title>The Dormouse‘s story</title></head> <body> asdf <div class="title"> <b>The Dormouse‘s story总共</b> <h1>f</h1> </div> <div class="story">Once upon a time there were three little sisters; and their names were <a class="sister0" id="link1">Els<span>f</span>ie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</div> ad<br/>sf <p class="story">...</p> </body> </html> """ soup = BeautifulSoup(html_doc, features="lxml") # 找到第一个a标签 tag1 = soup.find(name=‘a‘) # 找到所有的a标签 tag2 = soup.find_all(name=‘a‘) # 找到id=link2的标签 tag3 = soup.select(‘#link2‘)
安装:
pip3 install beautifulsoup4
使用示例:
from bs4 import BeautifulSoup html_doc = """ <html><head><title>The Dormouse‘s story</title></head> <body> ... </body> </html> """ soup = BeautifulSoup(html_doc, features="lxml")
1. name,标签名称
# tag = soup.find(‘a‘) # name = tag.name # 获取 # print(name) # tag.name = ‘span‘ # 设置 # print(soup)
2. attr,标签属性
# tag = soup.find(‘a‘) # attrs = tag.attrs # 获取 # print(attrs) # tag.attrs = {‘ik‘:123} # 设置 # tag.attrs[‘id‘] = ‘iiiii‘ # 设置 # print(soup)
3. children,所有子标签
# body = soup.find(‘body‘) # v = body.children
4. children,所有子子孙孙标签
# body = soup.find(‘body‘) # v = body.descendants
5. clear,将标签的所有子标签全部清空(保留标签名)
# tag = soup.find(‘body‘) # tag.clear() # print(soup)
6. decompose,递归的删除所有的标签
# body = soup.find(‘body‘) # body.decompose() # print(soup)
7. extract,递归的删除所有的标签,并获取删除的标签
# body = soup.find(‘body‘) # v = body.extract() # print(soup)
8. decode,转换为字符串(含当前标签);decode_contents(不含当前标签)
# body = soup.find(‘body‘) # v = body.decode() # v = body.decode_contents() # print(v)
原文:https://www.cnblogs.com/yunwangjun-python-520/p/10554876.html