国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

Python實現的爬取百度文庫功能示例

2020-02-16 01:11:47
字體:
來源:轉載
供稿:網友

本文實例講述了Python實現的爬取百度文庫功能。分享給大家供大家參考,具體如下:

# -*- coding: utf-8 -*-from selenium import webdriverfrom bs4 import BeautifulSoupfrom docx import Documentfrom docx.enum.text import WD_ALIGN_PARAGRAPH# 用來居中顯示標題from time import sleepfrom selenium.webdriver.common.keys import Keys# 瀏覽器安裝路徑#BROWSER_PATH=/'C:/Users/Administrator/AppData/Local/Google/Chrome/Application/chromedriver.exe'#目的URLDEST_URL='https://wenku.baidu.com/view/aa31a84bcf84b9d528ea7a2c.html'#用來保存文檔doc_title = ''doc_content_list = []def find_doc(driver, init=True):  global doc_content_list  global doc_title  stop_condition = False  html = driver.page_source  soup1 = BeautifulSoup(html, 'lxml')  if (init is True): # 得到標題    title_result = soup1.find('div', attrs={'class': 'doc-title'})    doc_title = title_result.get_text() # 得到文檔標題    # 拖動滾動條    init_page = driver.find_element_by_xpath( "http://div[@class='foldpagewg-text-con']")    print(type(init_page), init_page)    driver.execute_script('arguments[0].scrollIntoView();', init_page)    init_page.click()    init = False  else:    try:      page = driver.find_element_by_xpath( "http://div[@class='pagerwg-schedule']")      #print(type(next_page), next_page)      next_page = driver.find_element_by_class_name("pagerwg-button")      station = driver.find_element_by_xpath( "http://div[@class='bottombarwg-root border-none']")      driver.execute_script('arguments[0].scrollIntoView(false);', station)      #js.executeScript("arguments[0].click();",next_page);      #sleep(5)      '''js = "window.scrollTo(508,600)"      driver.execute_script(js)'''      next_page.click()    except:      #結束條件      print("找不到元素")      stop_condition = True      #next_page.send_keys(Keys.ENTER)      # 遍歷所有的txt標簽標定的文檔,將其空格刪除,然后進行保存  content_result = soup1.find_all('p', attrs={'class': 'txt'})  for each in content_result:    each_text = each.get_text()    if ' ' in each_text:      text = each_text.replace(' ', '')    else:      text = each_text    # print(each_text)    doc_content_list.append(text)          # 得到正文內容  sleep(2) # 防止頁面加載過慢  if stop_condition is False:    doc_title, doc_content_list = find_doc(driver, init)  return doc_title, doc_content_listdef save(doc_title, doc_content_list):  document = Document()  heading = document.add_heading(doc_title, 0)  heading.alignment = WD_ALIGN_PARAGRAPH.CENTER # 居中顯示  for each in doc_content_list:    document.add_paragraph(each)  # 處理字符編碼問題  t_title = doc_title.split()[0]  #print(t_title)  #document.save('2.docx')  document.save('百度文庫-%s.docx'% t_title)  print("/n/nCompleted: %s.docx, to read." % t_title)  driver.quit()if __name__ == '__main__':  options = webdriver.ChromeOptions()  options.add_argument('user-agent="Mozilla/5.0 (Linux; Android 4.0.4; / Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) / Chrome/18.0.1025.133 Mobile Safari/535.19"')  #driver = webdriver.Chrome(BROWSER_PATH, chrome_options=options)  driver = webdriver.Chrome(chrome_options=options)  driver.get(DEST_URL)  #JavascriptExecutor js = (JavascriptExecutor) driver;  print("**********START**********")  title, content = find_doc(driver, True)  save(title, content)  driver.quit()            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 汉中市| 张掖市| 会昌县| 宁强县| 哈巴河县| 顺昌县| 启东市| 兴山县| 德格县| 西吉县| 广河县| 中宁县| 桂阳县| 无棣县| 荥阳市| 嘉善县| 壶关县| 嘉义县| 甘德县| 佛坪县| 吴桥县| 甘孜县| 故城县| 天峻县| 罗源县| 额敏县| 信阳市| 拉萨市| 洛南县| 黔西| 上林县| 盐源县| 柳江县| 浙江省| 吉林省| 华阴市| 河东区| 嘉祥县| 平舆县| 西林县| 沂水县|