国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

Python使用Selenium模塊模擬瀏覽器抓取斗魚直播間信息示例

2020-02-15 22:26:23
字體:
來源:轉載
供稿:網友

本文實例講述了Python使用Selenium模塊模擬瀏覽器抓取斗魚直播間信息。分享給大家供大家參考,具體如下:

import timefrom multiprocessing import Poolfrom selenium import webdriverfrom selenium.webdriver.common.by import Byfrom selenium.webdriver.support.ui import WebDriverWaitfrom selenium.webdriver.support import expected_conditions as ECfrom selenium.common.exceptions import TimeoutExceptionfrom bs4 import BeautifulSoupfrom pymongo import MongoClientfrom pymongo.errors import PyMongoError# monogdb配置信息MONGO_HOST = "localhost"MONGO_DATABASE = "douyu"MONGO_TABLE = "zhibo"client = MongoClient(host=MONGO_HOST)db = client[MONGO_DATABASE]# PhantomJS 命令行相關配置# 參見 http://phantomjs.org/api/command-line.htmlSERVICE_ARGS = ['--disk-cache=true', '--load-images=false']# driver = webdriver.Chrome() # 有界面driver = webdriver.PhantomJS(service_args=SERVICE_ARGS) # 無界面delay = 10wait = WebDriverWait(driver, delay)driver.maximize_window()def get_total_pages():  url = 'https://www.douyu.com/directory/all'  driver.get(url)  pages = int(driver.find_element_by_css_selector(    '.shark-pager-dot + .shark-pager-item').text)  print("正在獲取第1頁數據")  room_list = get_rooms_by_beautifulsoup()  save_to_monogodb(room_list)  return pages# 根據頁碼獲取指定頁數據,并將其保存到數據庫中def parse_page(page_num):  print("正在獲取第%d頁數據" % page_num)  try:    page_num_box = wait.until(      EC.presence_of_element_located(        (By.CSS_SELECTOR, "input.jumptxt")))    go_btn = wait.until(EC.element_to_be_clickable(      (By.CSS_SELECTOR, 'a.shark-pager-submit')))    page_num_box.clear()    page_num_box.send_keys(page_num)    go_btn.click()    # driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")    # time.sleep(0.1)    wait.until(      EC.text_to_be_present_in_element(        (By.CSS_SELECTOR,         '.shark-pager-item.current'),        str(page_num)))    # 對于By.CLASS_NAME invalid selector: Compound class names not permitted    room_list = get_rooms_by_beautifulsoup()    save_to_monogodb(room_list)  except TimeoutException:    print("請求第%d頁失敗" % page_num)    print("嘗試重新獲取第%d頁" % page_num)    return parse_page(page_num)# 通過bs4解析數據def get_rooms_by_beautifulsoup():  '''  通過bs4庫解析數據  獲取直播間的名稱,觀看人數,標簽,主播名  '''  wait.until(EC.presence_of_element_located(    (By.CSS_SELECTOR, "ul#live-list-contentbox > li")))  html = driver.page_source  soup = BeautifulSoup(html, 'lxml')  rooms = soup.select('ul#live-list-contentbox > li')  for room in rooms:    room_name = room.find(      'h3', attrs={        'class': 'ellipsis'}).get_text(      strip=True)    view_count = room.find('span', class_='dy-num fr').text    tag = room.find('span', class_='tag ellipsis').text    hostname = room.find('span', class_='dy-name ellipsis fl').text    #print("房間名: " + room_name + "/t觀看人數: " + view_count + "/t標簽: " + tag + "/t主播名: " + hostname)    yield {      'room_name': room_name,      'view_count': view_count,      'tag': tag,      'hostname': hostname,    }def save_to_monogodb(room_list):  for room in room_list:    try:      db[MONGO_TABLE].insert(room)  # insert支持插入多條數據      print("mongodb插入數據成功:", room)    except PyMongoError as e:      print("mongodb插入數據失敗:", room, e)if __name__ == '__main__':  try:    total_pages = get_total_pages()    for page_num in range(2, total_pages + 1):      parse_page(page_num)  except Exception as e:    print("出錯了", e)  finally: # 確保 瀏覽器能正常關閉    print("共有%d頁" % total_pages)    driver.close()            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 哈巴河县| 白山市| 资源县| 衡山县| 姚安县| 石河子市| 博湖县| 深水埗区| 淮安市| 麻阳| 金溪县| 曲水县| 通化县| 榆树市| 五莲县| 保康县| 洪湖市| 监利县| 黎城县| 枞阳县| 武汉市| 儋州市| 西乡县| 英山县| 平凉市| 利川市| 靖边县| 泾川县| 高陵县| 禹州市| 绥德县| 阆中市| 旬邑县| 桃江县| 绥棱县| 曲周县| 龙岩市| 蓝山县| 正蓝旗| 团风县| 新郑市|