【python爬虫学习】一个简单的爬虫demo
【摘要】
##################################################
# 1. 请求页
import time
import requests
import re
hea...
##################################################
# 1. 请求页
import time
import requests
import re
headers = {
'user-agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'
}
# response = requests.get('https://www.vmgirls.com/13591.html', headers=headers)
response = requests.get('http://www.mculover666.cn/posts/1606619423/', headers=headers)
html = response.text
# print(response.request.headers)
# print(response.text)
# 2. 解析网页
# urls = re.findall('<a href="(.*?)" alt=".*?" title=".*?">', html)
# urls = re.findall('<img src=".*?" alt="mark">', html)
urls = re.findall('<img src=".*?" alt="mark">', html)
# <img src="http://mculover666.cn/image/20190806/9uiPTi5odYSj.png?imageslim" alt="mark">
print(urls)
# for url in urls:
# print(url.split('"')[1])
# 3. 保存图片
for url in urls:
# 延时
# time.sleep(1)
url = url.split('"')[1]
# print(url)
# 图片的名字
file_name = url.split('/')[-1].split('?')[0]
print(file_name)
response = requests.get(url, headers=headers)
# with open(file_name, "wb") as f:
# f.write(response.content)
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
文章来源: recclay.blog.csdn.net,作者:ReCclay,版权归原作者所有,如需转载,请联系作者。
原文链接:recclay.blog.csdn.net/article/details/104507271
【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)