国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

requests和lxml實現爬蟲的方法

2020-02-16 01:40:29
字體:
來源:轉載
供稿:網友

如下所示:

# requests模塊來請求頁面
# lxml模塊的html構建selector選擇器(格式化響應response)
# from lxml import html
# import requests

# response = requests.get(url).content

# selector = html.formatstring(response)

# hrefs = selector.xpath('/html/body//div[@class='feed-item _j_feed_item']/a/@href')

# 以url = 'https://www.mafengwo.cn/gonglve/ziyouxing/2033.html'為例子

# python 2.7import requestsfrom lxml import htmlimport os
# 獲取首頁中子頁的url鏈接def get_page_urls(url):  response = requests.get(url).content  # 通過lxml的html來構建選擇器  selector = html.fromstring(response)  urls = []  for i in selector.xpath("/html/body//div[@class='feed-item _j_feed_item']/a/@href"):    urls.append(i)  return urls
# get title from a child's html(div[@class='title'])def get_page_a_title(url):  '''url is ziyouxing's a@href'''  response = requests.get(url).content  selector = html.fromstring(response)  # get xpath by chrome's tool --> /html/body//div[@class='title']/text()  a_title = selector.xpath("/html/body//div[@class='title']/text()")  return a_title
# 獲取頁面選擇器(通過lxml的html構建)def get_selector(url):  response = requests.get(url).content  selector = html.fromstring(response)  return selector
# 通過chrome的開發者工具分析html頁面結構后發現,我們需要獲取的文本內容主要顯示在div[@class='l-topic']和div[@class='p-section']中
# 獲取所需的文本內容 def get_page_content(selector):   # /html/body/div[2]/div[2]/div[1]/div[@class='l-topic']/p/text()   page_title = selector.xpath("http://div[@class='l-topic']/p/text()")   # /html/body/div[2]/div[2]/div[1]/div[2]/div[15]/div[@class='p-section']/text()   page_content = selector.xpath("http://div[@class='p-section']/text()")   return page_title,page_content
# 獲取頁面中的圖片url地址def get_image_urls(selector):  imagesrcs = selector.xpath("http://img[@class='_j_lazyload']/@src")  return imagesrcs
# 獲取圖片的標題def get_image_title(selector, num)  # num 是從2開始的  url = "/html/body/div[2]/div[2]/div[1]/div[2]/div["+num+"]/span[@class='img-an']/text()"  if selector.xpath(url) is not None:    image_title = selector.xpath(url)  else:    image_title = "map"+str(num) # 沒有就起一個  return image_title
# 下載圖片def downloadimages(selector,number):  '''number是用來計數的'''  urls = get_image_urls()  num = 2  amount = len(urls)  for url in urls:    image_title = get_image_title(selector, num)    filename = "/home/WorkSpace/tour/words/result"+number+"/+"image_title+".jpg"    if not os.path.exists(filename):      os.makedirs(filename)    print('downloading %s image %s' %(number, image_title))    with open(filename, 'wb') as f:      f.write(requests.get(url).content)    num += 1  print "已經下載了%s張圖" %num            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 修文县| 彰武县| 临夏市| 朝阳市| 永康市| 久治县| 昔阳县| 惠来县| 肥城市| 长白| 上林县| 桐梓县| 神木县| 嘉荫县| 蕲春县| 柳林县| 平原县| 祁门县| 孟州市| 敖汉旗| 阳曲县| 江华| 汶川县| 凤台县| 罗城| 绩溪县| 贵州省| 博野县| 象州县| 亳州市| 合阳县| 舒兰市| 无棣县| 个旧市| 云霄县| 万安县| 岳西县| 班玛县| 平湖市| 米泉市| 沅江市|