Python3 黑板客爬虫闯关第一关
        【摘要】  #coding=utf-8import reimport requestsfrom requests.exceptions import RequestExceptionfrom bs4 import BeautifulSoup def getHtml(url): try: headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 1...
    
    
    
    
  
   - 
    
     
    
    
     
      #coding=utf-8
     
    
- 
    
     
    
    
     
      import re
     
    
- 
    
     
    
    
     
      import requests
     
    
- 
    
     
    
    
     
      from requests.exceptions import RequestException
     
    
- 
    
     
    
    
     
      from bs4 import BeautifulSoup
     
    
- 
    
     
    
    
      
     
    
- 
    
     
    
    
     
      def getHtml(url):
     
    
- 
    
     
    
    
      try:
     
    
- 
    
     
    
    
     
       headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}  
     
    
- 
    
     
    
    
     
       response = requests.get(url,headers = headers)
     
    
- 
    
     
    
    
      if response.status_code == 200:
     
    
- 
    
     
    
    
      return response.text
     
    
- 
    
     
    
    
      return None
     
    
- 
    
     
    
    
      except RequestException:
     
    
- 
    
     
    
    
      return None
     
    
- 
    
     
    
    
      
     
    
- 
    
     
    
    
     
      if __name__=='__main__':
     
    
- 
    
     
    
    
     
       start_url = "http://www.heibanke.com/lesson/crawler_ex00/"
     
    
- 
    
     
    
    
     
       real_url = start_url
     
    
- 
    
     
    
    
      while 1:#while 1 的运行速度比while True 要快那么一点
     
    
- 
    
     
    
    
      print ("当前请求页面:{}".format(real_url))
     
    
- 
    
     
    
    
     
       html = getHtml(real_url)
     
    
- 
    
     
    
    
     
       soup = BeautifulSoup(html,"lxml")
     
    
- 
    
     
    
    
     
       source =soup.select_one('h3').text
     
    
- 
    
     
    
    
     
       num = re.findall('\d+',source)
     
    
- 
    
     
    
    
      if len(num) == 0:
     
    
- 
    
     
    
    
      break
     
    
- 
    
     
    
    
     
       real_url =  start_url + num[0]
     
    
 考察点:url的拼接,BeautifulSoup库及正则表达式库的使用。
文章来源: blog.csdn.net,作者:悦来客栈的老板,版权归原作者所有,如需转载,请联系作者。
原文链接:blog.csdn.net/qq523176585/article/details/83019159
        【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
            cloudbbs@huaweicloud.com
        
        
        
        
        
        
        - 点赞
- 收藏
- 关注作者
 
             
           
评论(0)