单字符:
. : 除换行以外所有字符
[] :[aoe] [a-w] 匹配集合中任意一个字符
\d :数字 [0-9]
\D : 非数字
\w :数字、字母、下划线、中文
\W : 非\w
\s :所有的空白字符包,括空格、制表符、换页符等等。等价于 [ \f\n\r\t\v]。
\S : 非空白
数量修饰:
* : 任意多次 >=0
+ : 至少1次 >=1
? : 可有可无 0次或者1次
{m} :固定m次 hello{3,}
{m,} :至少m次
{m,n} :m-n次
边界:
$ : 以某某结尾
^ : 以某某开头
分组:
(ab)
贪婪模式: .*
非贪婪(惰性)模式: .*?
#方式1:
img_url = ‘https://ss1.bdstatic.com/70cFuXSh_Q1YnxGkpoWK1HF6hhy/it/u=1312059974,1893880587&fm=11&gp=0.jpg‘
response = requests.get(url=img_url,headers=headers)
img_data = response.content #content返回的是二进制形式的响应数据
with open(‘1.jpg‘,‘wb‘) as fp:
fp.write(img_data)
#方式2:
img_url = ‘https://ss1.bdstatic.com/70cFuXSh_Q1YnxGkpoWK1HF6hhy/it/u=1312059974,1893880587&fm=11&gp=0.jpg‘
#可以直接对url发起请求且进行持久化存储
urllib.request.urlretrieve(img_url,‘./2.jpg‘)
dirName = ‘ImgLibs‘
if not os.path.exists(dirName):
os.mkdir(dirName)
#1.捕获到当前首页的页面源码数据
url = ‘‘
page_text = requests.get(url=url,headers=headers).text
#2.从当前获取的页面源码数据中解析出图片地址
ex = ‘‘
img_src_list = re.findall(ex,page_text,re.S)
for src in img_src_list:
src = ‘http://www.521609.com‘+src
imgPath = dirName+‘/‘+src.split(‘/‘)[-1]
urllib.request.urlretrieve(src,imgPath)
print(imgPath,‘下载成功!!!‘)
from bs4 import BeautifulSoup
import requests
main_url = ‘http://www.shicimingju.com/book/sanguoyanyi.html‘
response = requests.get(url=main_url,headers=headers)
response.encoding = ‘utf-8‘
page_text = response.text
fp = open(‘./sanguo.txt‘,‘w‘,encoding=‘utf-8‘)
#数据解析:章节标题,详情页url,章节内容
soup = BeautifulSoup(page_text,‘lxml‘)
#定位到的所有的符合要求的a标签
a_list = soup.select(‘.book-mulu > ul > li > a‘)
for a in a_list:
title = a.string
detail_url = ‘http://www.shicimingju.com‘+a[‘href‘]
#对详情页发起请求解析出章节内容
page_text_detail = requests.get(url=detail_url,headers=headers).text
soup = BeautifulSoup(page_text_detail,‘lxml‘)
div_tag = soup.find(‘div‘,class_="chapter_content")
content = div_tag.text
fp.write(title+‘:‘+content+‘\n‘)
print(title,‘保存成功!!!‘)
fp.close()
最左侧的/:如果xpath表达式最左侧是以/开头则表示该xpath表达式一定要从根标签开始定位指定标签(忽略)
非最左侧的/:表示一个层级
非左侧的//:表示多个层级
最左侧的//:xpath表达式可以从任意位置进行标签定位
属性定位:tagName[@attrName="value"]
索引定位:tag[index]:索引是从1开始
模糊匹配:
#爬取多页
dirName = ‘GirlsLib‘
if not os.path.exists(dirName):
os.mkdir(dirName)
#定义一个通用的url模板:不可变
url = ‘http://pic.netbian.com/4kmeinv/index_%d.html‘
for page in range(1,6):
if page == 1:
new_url = ‘http://pic.netbian.com/4kmeinv/‘
else:
new_url = format(url%page)
response = requests.get(url=new_url,headers=headers)
response.encoding = ‘gbk‘
page_text = response.text
#图片名称+图片数据
tree = etree.HTML(page_text)
#存储的是定位到的指定的li标签
li_list = tree.xpath(‘//div[@class="slist"]/ul/li‘)
for li in li_list:
# print(type(li)) #li的数据类型和tree的数据类型一样,li也可以调用xpath方法
title = li.xpath(‘./a/img/@alt‘)[0]+‘.jpg‘#进行局部数据解析
img_src = ‘http://pic.netbian.com‘+li.xpath(‘./a/img/@src‘)[0]
img_data = requests.get(url=img_src,headers=headers).content
imgPath = dirName +‘/‘+title
with open(imgPath,‘wb‘) as fp:
fp.write(img_data)
print(title,‘保存成功!!!‘)
注意
: 编码为gbk,二进制数据
#将https://www.aqistudy.cn/historydata/所有的城市名称解析出来
url = ‘https://www.aqistudy.cn/historydata/‘
page_text = requests.get(url=url,headers=headers).text
tree = etree.HTML(page_text)
# hot_cities = tree.xpath(‘//div[@class="bottom"]/ul/li/a/text()‘)
# all_cities = tree.xpath(‘//div[@class="bottom"]/ul/div[2]/li/a/text()‘)
tree.xpath(‘//div[@class="bottom"]/ul/li/a/text() | //div[@class="bottom"]/ul/div[2]/li/a/text()‘)
原文:https://www.cnblogs.com/yebaoer/p/15294771.html