国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python實現登陸知乎獲得個人收藏并保存為word文件

2020-02-23 00:22:27
字體:
來源:轉載
供稿:網友

這個程序其實很早之前就完成了,一直沒有發出了,趁著最近不是很忙就分享給大家.
使用BeautifulSoup模塊和urllib2模塊實現,然后保存成word是使用python docx模塊的,安裝方式網上一搜一大堆,我就不再贅述了.

主要實現的功能是登陸知乎,然后將個人收藏的問題和答案獲取到之后保存為word文檔,以便沒有網絡的時候可以查閱.當然,答案中如果有圖片的話也是可以獲取到的.不過這塊還是有點問題的.等以后有時間了在修改修改吧.

還有就是正則,用的簡直不要太爛…鄙視下自己…

還有,現在是問題的話所有的答案都會保存下來的.看看有時間修改成只保存第一個答案或者收藏頁問題的答案吧.要不然如果收藏的太多了的話保存下來的word會嚇你一跳的哦.O(∩_∩)O哈哈~

在登陸的時候可能會需要驗證碼,如果提示輸入驗證碼的話在程序的文件夾下面就可以看到驗證碼的圖片,照著輸入就ok了.

# -*- coding: utf-8 -*-#登陸知乎抓取個人收藏 然后保存為wordimport sysreload(sys) sys.setdefaultencoding('utf-8')import urllibimport urllib2import cookielibimport stringimport refrom bs4 import BeautifulSoupfrom docx import Documentfrom docx import *from docx.shared import Inchesfrom sys import exitimport os #這兒是因為在公司上網的話需要使用socket代理#import socks#import socket#socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5,"127.0.0.1",8088)#socket.socket =socks.socksocket loginurl='http://www.zhihu.com/login' headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36',}  postdata={ '_xsrf': 'acab9d276ea217226d9cc94a84a231f7', 'email': '', 'password': '', 'rememberme':'y'  } if not os.path.exists('myimg'):  os.mkdir('myimg')if os.path.exists('123.docx'):  os.remove('123.docx')if os.path.exists('checkcode.gif'):  os.remove('checkcode.gif') mydoc=Document()questiontitle=''#----------------------------------------------------------------------def dealimg(imgcontent):  soup=BeautifulSoup(imgcontent)  try:    for imglink in soup.findAll('img'):      if imglink is not None :        myimg= imglink.get('src')        #print myimg        if myimg.find('http')>=0:          imgsrc=urllib2.urlopen(myimg).read()          imgnamere=re.compile(r'http/S*/')          imgname=imgnamere.sub('',myimg)          #print imgname          with open(u'myimg'+'/'+imgname,'wb') as code:            code.write(imgsrc)            mydoc.add_picture(u'myimg/'+imgname,width=Inches(1.25))  except:    pass  strinfo=re.compile(r'<noscript>[/s/S]*</noscript>')  imgcontent=strinfo.sub('',imgcontent)  strinfo=re.compile(r'<img class[/s/S]*</>')  imgcontent=strinfo.sub('',imgcontent)  #show all  strinfo=re.compile(r'<a class="toggle-expand[/s/S]*</a>')  imgcontent=strinfo.sub('',imgcontent)   strinfo=re.compile(r'<a class=" wrap external"[/s/S]*rel="nofollow noreferrer" target="_blank">')  imgcontent=strinfo.sub('',imgcontent)  imgcontent=imgcontent.replace('<i class="icon-external"></i></a>','')    imgcontent=imgcontent.replace('</b>','').replace('</p>','').replace('<p>','').replace('<p>','').replace('<br>','')  return imgcontent       def enterquestionpage(pageurl):  html=urllib2.urlopen(pageurl).read()  soup=BeautifulSoup(html)  questiontitle=soup.title.string  mydoc.add_heading(questiontitle,level=3)  for div in soup.findAll('div',{'class':'fixed-summary zm-editable-content clearfix'}):    #print div    conent=str(div).replace('<div class="fixed-summary zm-editable-content clearfix">','').replace('</div>','')         conent=conent.decode('utf-8')    conent=conent.replace('<br/>','/n')         conent=dealimg(conent)    ###這一塊弄得太復雜了 有時間找找看有沒有處理html的模塊    conent=conent.replace('<div class="fixed-summary-mask">','').replace('<blockquote>','').replace('<b>','').replace('<strong>','').replace('</strong>','').replace('<em>','').replace('</em>','').replace('</blockquote>','')    mydoc.add_paragraph(conent,style='BodyText3')    """file=open('222.txt','a')    file.write(str(conent))    file.close()"""      def entercollectpage(pageurl):  html=urllib2.urlopen(pageurl).read()  soup=BeautifulSoup(html)  for div in soup.findAll('div',{'class':'zm-item'}):    h2content=div.find('h2',{'class':'zm-item-title'})    #print h2content    if h2content is not None:      link=h2content.find('a')      mylink=link.get('href')      quectionlink='http://www.zhihu.com'+mylink      enterquestionpage(quectionlink)      print quectionlink       def loginzhihu():  postdatastr=urllib.urlencode(postdata)  '''  cj = cookielib.LWPCookieJar()  cookie_support = urllib2.HTTPCookieProcessor(cj)  opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)  urllib2.install_opener(opener)  '''  h = urllib2.urlopen(loginurl)  request = urllib2.Request(loginurl,postdatastr,headers)  request.get_origin_req_host  response = urllib2.urlopen(request)  #print response.geturl()  text = response.read()    collecturl='http://www.zhihu.com/collections'  req=urllib2.urlopen(collecturl)  if str(req.geturl())=='http://www.zhihu.com/?next=%2Fcollections':    print 'login fail!'    return  txt=req.read()   soup=BeautifulSoup(txt)  count=0  divs =soup.findAll('div',{'class':'zm-item'})  if divs is None:    print 'login fail!'    return  print 'login ok!/n'  for div in divs:         link=div.find('a')    mylink=link.get('href')    collectlink='http://www.zhihu.com'+mylink    entercollectpage(collectlink)    print collectlink    #這兒是當時做測試用的,值獲取一個收藏    #count+=1    #if count==1:    #  return      def getcheckcode(thehtml):  soup=BeautifulSoup(thehtml)  div=soup.find('div',{'class':'js-captcha captcha-wrap'})  if div is not None:    #print div    imgsrc=div.find('img')    imglink=imgsrc.get('src')    if imglink is not None:      imglink='http://www.zhihu.com'+imglink       imgcontent=urllib2.urlopen(imglink).read()      with open('checkcode.gif','wb') as code:        code.write(imgcontent)      return True    else:      return False  return False  if __name__=='__main__':     import getpass  username=raw_input('input username:')  password=getpass.getpass('Enter password: ')      postdata['email']=username  postdata['password']=password  postdatastr=urllib.urlencode(postdata)  cj = cookielib.LWPCookieJar()  cookie_support = urllib2.HTTPCookieProcessor(cj)  opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)  urllib2.install_opener(opener)   h = urllib2.urlopen(loginurl)  request = urllib2.Request(loginurl,postdatastr,headers)  response = urllib2.urlopen(request)  txt = response.read()   if getcheckcode(txt):    checkcode=raw_input('input checkcode:')    postdata['captcha']=checkcode    loginzhihu()    mydoc.save('123.docx')  else:    loginzhihu()    mydoc.save('123.docx')   print 'the end'  raw_input()            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 唐河县| 开鲁县| 阿图什市| 孝感市| 厦门市| 玉门市| 获嘉县| 汝阳县| 兰坪| 泗水县| 洛扎县| 崇明县| 宜春市| 海原县| 太仓市| 怀仁县| 固镇县| 葫芦岛市| 夏邑县| 化德县| 云龙县| 林州市| 永仁县| 文安县| 大足县| 安新县| 百色市| 澎湖县| 宜阳县| 砚山县| 布尔津县| 县级市| 贵阳市| 泸定县| 健康| 玛沁县| 揭西县| 广州市| 遂川县| 玉龙| 盐城市|