国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python下載微信公眾號相關文章

2020-02-16 01:21:29
字體:
來源:轉載
供稿:網友

本文實例為大家分享了python下載微信公眾號相關文章的具體代碼,供大家參考,具體內容如下

目的:從零開始學自動化測試公眾號中下載“pytest"一系列文檔

1、搜索微信號文章關鍵字搜索

2、對搜索結果前N頁進行解析,獲取文章標題和對應URL

主要使用的是requests和bs4中的Beautifulsoup

Weixin.py

import requestsfrom urllib.parse import quotefrom bs4 import BeautifulSoupimport refrom WeixinSpider.HTML2doc import MyHTMLParser class WeixinSpider(object):  def __init__(self, gzh_name, pageno,keyword):  self.GZH_Name = gzh_name  self.pageno = pageno  self.keyword = keyword.lower()  self.page_url = []  self.article_list = []  self.headers = {   'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}  self.timeout = 5  # [...] 用來表示一組字符,單獨列出:[amk] 匹配 'a','m'或'k'  # re+ 匹配1個或多個的表達式。  self.pattern = r'[///:*?"<>|/r/n]+'  def get_page_url(self):  for i in range(1,self.pageno+1):   # https://weixin.sogou.com/weixin?query=從零開始學自動化測試&_sug_type_=&s_from=input&_sug_=n&type=2&page=2&ie=utf8   url = "https://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=2&page=%s&ie=utf8" /     % (quote(self.GZH_Name),i)   self.page_url.append(url)  def get_article_url(self):  article = {}  for url in self.page_url:   response = requests.get(url,headers=self.headers,timeout=self.timeout)   result = BeautifulSoup(response.text, 'html.parser')   articles = result.select('ul[class="news-list"] > li > div[class="txt-box"] > h3 > a ')   for a in articles:    # print(a.text)    # print(a["href"])    if self.keyword in a.text.lower():      new_text=re.sub(self.pattern,"",a.text)      article[new_text] = a["href"]      self.article_list.append(article)   headers = {'User-Agent':      'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}timeout = 5gzh_name = 'pytest文檔'My_GZH = WeixinSpider(gzh_name,5,'pytest')My_GZH.get_page_url()# print(My_GZH.page_url)My_GZH.get_article_url()# print(My_GZH.article_list)for article in My_GZH.article_list: for (key,value) in article.items():  url=value  html_response = requests.get(url,headers=headers,timeout=timeout)  myHTMLParser = MyHTMLParser(key)  myHTMLParser.feed(html_response.text)  myHTMLParser.doc.save(myHTMLParser.docfile)

HTML2doc.py

from html.parser import HTMLParserimport requestsfrom docx import Documentimport refrom docx.shared import RGBColorimport docx  class MyHTMLParser(HTMLParser): def __init__(self,docname):  HTMLParser.__init__(self)  self.docname=docname  self.docfile = r"D:/pytest/%s.doc"%self.docname  self.doc=Document()  self.title = False  self.code = False  self.text=''  self.processing =None  self.codeprocessing =None  self.picindex = 1  self.headers = {   'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}  self.timeout = 5  def handle_startendtag(self, tag, attrs):  # 圖片的處理比較復雜,首先需要找到對應的圖片的url,然后下載并寫入doc中  if tag == "img":   if len(attrs) == 0:    pass   else:    for (variable, value) in attrs:     if variable == "data-type":      picname = r"D:/pytest/%s%s.%s" % (self.docname, self.picindex, value)      # print(picname)     if variable == "data-src":      picdata = requests.get(value, headers=self.headers, timeout=self.timeout)      # print(value)    self.picindex = self.picindex + 1    # print(self.picindex)    with open(picname, "wb") as pic:     pic.write(picdata.content)    try:     self.doc.add_picture(picname)    except docx.image.exceptions.UnexpectedEndOfFileError as e:     print(e)  def handle_starttag(self, tag, attrs):  if re.match(r"h(/d)", tag):   self.title = True  if tag =="p":   self.processing = tag  if tag == "code":   self.code = True   self.codeprocessing = tag  def handle_data(self, data):   if self.title == True:    self.doc.add_heading(data, level=2)   # if self.in_div == True and self.tag == "p":   if self.processing:    self.text = self.text + data   if self.code == True:    p =self.doc.add_paragraph()    run=p.add_run(data)    run.font.color.rgb = RGBColor(111,111,111)  def handle_endtag(self, tag):  self.title = False  # self.code = False  if tag == self.processing:   self.doc.add_paragraph(self.text)    self.processing = None   self.text=''  if tag == self.codeprocessing:   self.code =False            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 凤凰县| 上犹县| 双江| 嵊泗县| 乐山市| 黎川县| 遵义市| 永年县| 沐川县| 朝阳区| 佛坪县| 赞皇县| 囊谦县| 什邡市| 武定县| 泸州市| 鸡泽县| 安庆市| 大石桥市| 根河市| 乳源| 临漳县| 朝阳区| 聂拉木县| 泗洪县| 怀安县| 汕头市| 佳木斯市| 霍州市| 凤山市| 河源市| 大石桥市| 新营市| 筠连县| 新疆| 祁阳县| 延川县| 长春市| 道真| 靖宇县| 奈曼旗|