国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python爬蟲實例詳解

2020-02-15 21:55:35
字體:
來源:轉載
供稿:網友

本篇博文主要講解Python爬蟲實例,重點包括爬蟲技術架構,組成爬蟲的關鍵模塊:URL管理器、HTML下載器和HTML解析器。

爬蟲簡單架構

程序入口函數(爬蟲調度段)

#coding:utf8import time, datetimefrom maya_Spider import url_manager, html_downloader, html_parser, html_outputerclass Spider_Main(object): #初始化操作 def __init__(self):  #設置url管理器  self.urls = url_manager.UrlManager()  #設置HTML下載器  self.downloader = html_downloader.HtmlDownloader()  #設置HTML解析器  self.parser = html_parser.HtmlParser()  #設置HTML輸出器  self.outputer = html_outputer.HtmlOutputer() #爬蟲調度程序 def craw(self, root_url):  count = 1  self.urls.add_new_url(root_url)  while self.urls.has_new_url():   try:    new_url = self.urls.get_new_url()    print('craw %d : %s' % (count, new_url))    html_content = self.downloader.download(new_url)    new_urls, new_data = self.parser.parse(new_url, html_content)    self.urls.add_new_urls(new_urls)    self.outputer.collect_data(new_data)    if count == 10:     break    count = count + 1   except:    print('craw failed')  self.outputer.output_html()if __name__ == '__main__': #設置爬蟲入口 root_url = 'http://baike.baidu.com/view/21087.htm' #開始時間 print('開始計時..............') start_time = datetime.datetime.now() obj_spider = Spider_Main() obj_spider.craw(root_url) #結束時間 end_time = datetime.datetime.now() print('總用時:%ds'% (end_time - start_time).seconds)

URL管理器

class UrlManager(object): def __init__(self):  self.new_urls = set()  self.old_urls = set() def add_new_url(self, url):  if url is None:   return  if url not in self.new_urls and url not in self.old_urls:   self.new_urls.add(url) def add_new_urls(self, urls):  if urls is None or len(urls) == 0:   return  for url in urls:   self.add_new_url(url) def has_new_url(self):  return len(self.new_urls) != 0 def get_new_url(self):  new_url = self.new_urls.pop()  self.old_urls.add(new_url)  return new_url

網頁下載器

import urllibimport urllib.requestclass HtmlDownloader(object): def download(self, url):  if url is None:   return None  #偽裝成瀏覽器訪問,直接訪問的話csdn會拒絕  user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'  headers = {'User-Agent':user_agent}  #構造請求  req = urllib.request.Request(url,headers=headers)  #訪問頁面  response = urllib.request.urlopen(req)  #python3中urllib.read返回的是bytes對象,不是string,得把它轉換成string對象,用bytes.decode方法  return response.read().decode()

網頁解析器

import reimport urllibfrom urllib.parse import urlparsefrom bs4 import BeautifulSoupclass HtmlParser(object): def _get_new_urls(self, page_url, soup):  new_urls = set()  #/view/123.htm  links = soup.find_all('a', href=re.compile(r'/item/.*?'))  for link in links:   new_url = link['href']   new_full_url = urllib.parse.urljoin(page_url, new_url)   new_urls.add(new_full_url)  return new_urls #獲取標題、摘要 def _get_new_data(self, page_url, soup):  #新建字典  res_data = {}  #url  res_data['url'] = page_url  #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>獲得標題標簽  title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1')  print(str(title_node.get_text()))  res_data['title'] = str(title_node.get_text())  #<div class="lemma-summary" label-module="lemmaSummary">  summary_node = soup.find('div', class_="lemma-summary")  res_data['summary'] = summary_node.get_text()  return res_data def parse(self, page_url, html_content):  if page_url is None or html_content is None:   return None  soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')  new_urls = self._get_new_urls(page_url, soup)  new_data = self._get_new_data(page_url, soup)  return new_urls, new_data            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 新兴县| 甘南县| 石河子市| 西林县| 柳州市| 柘荣县| 桑日县| 祁门县| 呈贡县| 交城县| 霍山县| 登封市| 乌鲁木齐市| 汤原县| 前郭尔| 应城市| 永顺县| 太保市| 嘉义市| 平远县| 改则县| 洛阳市| 遂溪县| 商南县| 尉犁县| 张家口市| 高台县| 肇源县| 晋江市| 连城县| 怀安县| 博客| 双柏县| 岳阳县| 洞口县| 紫云| 丰城市| 晋江市| 来安县| 乡宁县| 徐闻县|