国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python實現批量下載新浪博客的方法

2020-02-23 01:34:20
字體:
來源:轉載
供稿:網友

本文實例講述了python實現批量下載新浪博客的方法。分享給大家供大家參考。具體實現方法如下:

# coding=utf-8 import urllib2import sys, osimport reimport stringfrom BeautifulSoup import BeautifulSoupdef encode(s):  return s.decode('utf-8').encode(sys.stdout.encoding, 'ignore')def getHTML(url):  #proxy_handler = urllib2.ProxyHandler({'http':'http://211.138.124.211:80'})  #opener = urllib2.build_opener(proxy_handler)  #urllib2.install_opener(opener)  req = urllib2.Request(url)  response = urllib2.urlopen(req, timeout=15)  return BeautifulSoup(response, convertEntities=BeautifulSoup.HTML_ENTITIES)def visible(element):  '''抓取可見的文本元素'''  if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:    return False  elif re.match('<!--.*-->', str(element)):    return False  elif element == u'/xa0':    return False  return Truedef delReturn(element):  '''刪除元素內的換行'''  return re.sub('(?<!^)/n+(?!$)', ' ', str(element)).decode('utf-8')def validFilename(filename):  # windows  return re.sub('[//:*?<>"|/xa0]', '', filename)def writeToFile(text, filename, dirname):  if not os.path.exists(dirname):    os.makedirs(dirname)    print encode('保存到目錄'), dirname  filename = validFilename(filename)  print encode('保存文章'), filename  path = os.path.join(dirname, filename)  if not os.path.exists(path):    f = open(path, 'w')    f.write(text)    f.close()  else:    print filename, encode('已經存在')def formatContent(url, title=''):  '''格式化文章內容'''  page = getHTML(url)  content = page.find('div', {'class':'articalContent'})  art_id = re.search('blog_(/w+)/.html', url).group(1)  blog_name = page.find('span', id='blognamespan').string  if title == '':    title = page.find('h2', id=re.compile('^t_')).string  temp_data = filter(visible, content.findAll(text=True)) # 去掉不可見元素  temp_data = ''.join(map(delReturn, temp_data)) # 刪除元素內的換行符  temp_data = temp_data.strip() # 刪除文章首尾的空行  temp_data = re.sub('/n{2,}', '/n/n', temp_data) # 刪除文章內過多的空行  # 輸出到文件  # 編碼問題  temp_data = '本文地址:'.decode('utf-8') + url + '/n/n' + temp_data  op_text = temp_data.encode('utf-8')  op_file = title + '_' + art_id +'.txt'  writeToFile(op_text, op_file, blog_name)def articlelist(url):  articles = {}  page = getHTML(url)  pages = page.find('ul', {'class':'SG_pages'}).span.string  page_num = int(re.search('(/d+)', pages).group(1))  for i in range(1, page_num+1):    print encode('生成第%d頁文章索引'%i)    if i != 1:      url = re.sub('(_)/d+(/.html)$', '/g<1>'+str(i)+'/g<2>', url)      page = getHTML(url)    article = page.findAll('span', {'class':'atc_title'})    for art in article:      art_title = art.a['title']      art_href = art.a['href']      articles[art_title] = art_href  return articlesdef blog_dld(articles):  if not isinstance(articles, dict):    return False  print encode('開始下載文章')  for art_title, art_href in articles.items():    formatContent(art_href, art_title)if __name__ == '__main__':  sel = raw_input(encode('你要下載的是(1)全部文章還是(2)單篇文章,輸入1或者2: '))  if sel == '1':    #articlelist_url = 'http://blog.sina.com.cn/s/articlelist_1303481411_0_1.html'    articlelist_url = raw_input(encode('請輸入博客文章目錄鏈接: '))    articles = articlelist(articlelist_url)    blog_dld(articles)  else:    #article_url = 'http://blog.sina.com.cn/s/blog_4db18c430100gxc5.html'    article_url = raw_input(encode('請輸入博客文章鏈接: '))    formatContent(article_url)            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 内黄县| 花莲市| 马鞍山市| 赣州市| 政和县| 潢川县| 绥江县| 青冈县| 克山县| 竹山县| 清涧县| 昌吉市| 迁安市| 富宁县| 伊吾县| 揭东县| 东明县| 固镇县| 郯城县| 延边| 桦南县| 鸡东县| 视频| 邵武市| 乐安县| 溧水县| 池州市| 临湘市| 甘孜县| 潞西市| 新邵县| 承德县| 浑源县| 简阳市| 鄱阳县| 临澧县| 临澧县| 信丰县| 武安市| 广宗县| 平利县|