Python爬虫实战(十):爬取Linux公社资源站的所有电子资源

举报
悦来客栈的老板 发表于 2020/12/28 23:45:34 2020/12/28
【摘要】 #coding=utf-8import reimport requestsfrom tenacity import retry, stop_after_attempt @retry(stop=stop_after_attempt(3))def get_html(url): '''获取页面源代码''' headers = {'User-Agent': 'Mozilla/5.0 ...

  
  1. #coding=utf-8
  2. import re
  3. import requests
  4. from tenacity import retry, stop_after_attempt
  5. @retry(stop=stop_after_attempt(3))
  6. def get_html(url):
  7. '''获取页面源代码'''
  8. headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}
  9. page = requests.get(url,headers = headers)
  10. html = page.text
  11. return html
  12. def get_real_url(url,reg):
  13. '''获取真实的页面地址'''
  14. mainurl = 'http://linux.linuxidc.com/'
  15. html = get_html(url)
  16. items = re.findall(reg,html)
  17. for item in items:
  18. realurl = '{}{}'.format(mainurl,item)
  19. yield realurl
  20. def get_year_url():
  21. '''获取**年的页面地址'''
  22. url = 'http://linux.linuxidc.com/index.php'
  23. reg = re.compile(r'href="(.*?)">\d+年资料')
  24. year_urls = get_real_url(url,reg)
  25. for year_url in year_urls:
  26. yield year_url
  27. def get_month_url():
  28. '''获取**月的页面地址'''
  29. reg = re.compile(r'href="(.*?)">\d+月')
  30. year_urls = get_year_url()
  31. for year_url in year_urls:
  32. month_urls = get_real_url(year_url,reg)
  33. for month_url in month_urls:
  34. yield month_url
  35. def get_day_url():
  36. '''获取**日的页面地址'''
  37. reg = re.compile(r'href="(.*?)">\d+日')
  38. month_urls = get_month_url()
  39. for month_url in month_urls:
  40. day_urls = get_real_url(month_url,reg)
  41. for day_url in day_urls:
  42. yield day_url
  43. def get_books_urls(urls):
  44. '''获取资料名称及下载页'''
  45. for url in urls:
  46. reg = re.compile(r'href="(index.*?)">(.*?)</a></div></td><td width="100">')
  47. html = get_html(url)
  48. items = re.findall(reg,html)
  49. for item in items:
  50. yield item
  51. def get_other_url(url):
  52. '''获取其他链接的资料'''
  53. reg = re.compile(r'href="(index.*?)">.*?</a></div></td><td width="100">')
  54. all_urls = get_real_url(url,reg)
  55. for all_url in all_urls:
  56. yield all_url
  57. def print_book_url(book,book_url):
  58. '''打印可下载的书籍及链接'''
  59. url = 'http://linux.linuxidc.com/'
  60. book = book.lower()
  61. item = book_url
  62. if len(item) == 2:
  63. bookname = item[1].lower()
  64. if book in bookname:
  65. print ('\n'+ item[1])
  66. print ("资料下载链接:")
  67. dlurl = '{}{}'.format(url,item[0])
  68. reg = re.compile(r'href="(linuxconf/download.php.*?)">.*?</a></div></td><td width="100">')
  69. download_urls = get_real_url(dlurl,reg)
  70. for download_url in download_urls:
  71. print (download_url)
  72. def get_download_url(book):
  73. url_2011 = 'http://linux.linuxidc.com/index.php?folder=MjAxMcTq18rBzw=='
  74. all_urls = get_other_url(url_2011)
  75. books_urls = get_books_urls(all_urls)
  76. for book_url in books_urls:
  77. print_book_url (book,book_url)
  78. day_urls = get_day_url()
  79. books_urls = get_books_urls(day_urls)
  80. for book_url in books_urls:
  81. print_book_url (book,book_url)
  82. if __name__=='__main__':
  83. book = input ("请输入资料名称:")
  84. get_download_url(book)



说明:待添加。

文章来源: blog.csdn.net,作者:悦来客栈的老板,版权归原作者所有,如需转载,请联系作者。

原文链接:blog.csdn.net/qq523176585/article/details/78443714

【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。