国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

在Python中使用CasperJS獲取JS渲染生成的HTML內(nèi)容的教程

2020-02-23 00:38:44
字體:
供稿:網(wǎng)友

文章摘要:其實(shí)這里casperjs與python沒有直接關(guān)系,主要依賴casperjs調(diào)用phantomjs webkit獲取html文件內(nèi)容。長期以來,爬蟲抓取 客戶端javascript渲染生成的html頁面 都極為 困難, Java里面有 HtmlUnit, 而Python里,我們可以使用獨(dú)立的跨平臺(tái)的CasperJS。

    創(chuàng)建site.js(接口文件,輸入:url,輸出:html file)  

   //USAGE: E:/toolkit/n1k0-casperjs-e3a77d0/bin>python casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile='temp.html'          var fs = require('fs');     var casper = require('casper').create({      pageSettings: {      loadImages: false,          loadPlugins: false,         userAgent: 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36 LBBROWSER'     },     logLevel: "debug",//日志等級(jí)     verbose: true  // 記錄日志到控制臺(tái)      });     var url = casper.cli.raw.get('url');     var outputfile = casper.cli.raw.get('outputfile');     //請(qǐng)求頁面     casper.start(url, function () {     fs.write(outputfile, this.getHTML(), 'w');     });          casper.run(); 

    python 代碼, checkout_proxy.py      

 import json     import sys     #import requests     #import requests.utils, pickle     from bs4 import BeautifulSoup     import os.path,os     import threading     #from multiprocessing import Process, Manager     from datetime import datetime     import traceback     import logging     import re,random     import subprocess     import shutil     import platform                          output_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'proxy.txt')     global_log = 'http_proxy' + datetime.now().strftime('%Y-%m-%d') + '.log'     if not os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs')):       os.mkdir(os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs'))     global_log = os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs',global_log)          logging.basicConfig(level=logging.DEBUG,format='[%(asctime)s] [%(levelname)s] [%(module)s] [%(funcName)s] [%(lineno)d] %(message)s',filename=global_log,filemode='a')     log = logging.getLogger(__name__)      #manager = Manager()     #PROXY_LIST = manager.list()     mutex = threading.Lock()     PROXY_LIST = []               def isWindows():       if "Windows" in str(platform.uname()):       return True       else:       return False               def getTagsByAttrs(tagName,pageContent,attrName,attrRegValue):       soup = BeautifulSoup(pageContent)                                                       return soup.find_all(tagName, { attrName : re.compile(attrRegValue) })               def getTagsByAttrsExt(tagName,filename,attrName,attrRegValue):       if os.path.isfile(filename):       f = open(filename,'r')          soup = BeautifulSoup(f)       f.close()       return soup.find_all(tagName, { attrName : re.compile(attrRegValue) })       else:       return None               class Site1Thread(threading.Thread):       def __init__(self,outputFilePath):         threading.Thread.__init__(self)       self.outputFilePath = outputFilePath       self.fileName = str(random.randint(100,1000)) + ".html"       self.setName('Site1Thread')             def run(self):       site1_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'site.js')       site2_file = os.path.join(self.outputFilePath,'site.js')       if not os.path.isfile(site2_file) and os.path.isfile(site1_file):         shutil.copy(site1_file,site2_file)       #proc = subprocess.Popen(["bash","-c", "cd %s && ./casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile=%s" % (self.outputFilePath,self.fileName) ],stdout=subprocess.PIPE)       if isWindows():         proc = subprocess.Popen(["cmd","/c", "%s/casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile=%s" % (self.outputFilePath,self.fileName) ],stdout=subprocess.PIPE)       else:         proc = subprocess.Popen(["bash","-c", "cd %s && ./casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile=%s" % (self.outputFilePath,self.fileName) ],stdout=subprocess.PIPE)       out=proc.communicate()[0]       htmlFileName = ''       #因?yàn)檩敵雎窂皆趙indows不確定,所以這里加了所有可能的路徑判斷       if os.path.isfile(self.fileName):         htmlFileName = self.fileName       elif os.path.isfile(os.path.join(self.outputFilePath,self.fileName)):         htmlFileName = os.path.join(self.outputFilePath,self.fileName)       elif os.path.isfile(os.path.join(os.path.dirname(os.path.realpath(__file__)),self.fileName)):         htmlFileName = os.path.join(os.path.dirname(os.path.realpath(__file__)),self.fileName)        if (not os.path.isfile(htmlFileName)):         print 'Failed to get html content from http://spys.ru/free-proxy-list/IE/'         print out         sys.exit(3)        mutex.acquire()       PROXYList= getTagsByAttrsExt('font',htmlFileName,'class','spy14$')       for proxy in PROXYList:         tdContent = proxy.renderContents()         lineElems = re.split('[<>]',tdContent)         if re.compile(r'/d+').search(lineElems[-1]) and re.compile('(/d+/./d+/./d+)').search(lineElems[0]):         print lineElems[0],lineElems[-1]         PROXY_LIST.append("%s:%s" % (lineElems[0],lineElems[-1]))       mutex.release()       try:         if os.path.isfile(htmlFileName):         os.remove(htmlFileName)       except:         pass          if __name__ == '__main__':       try:       if(len(sys.argv)) < 2:         print "Usage:%s [casperjs path]" % (sys.argv[0])         sys.exit(1)        if not os.path.exists(sys.argv[1]):         print "casperjs path: %s does not exist!" % (sys.argv[1])         sys.exit(2)        if os.path.isfile(output_file):         f = open(output_file)         lines = f.readlines()         f.close         for line in lines:         PROXY_LIST.append(line.strip())       thread1 = Site1Thread(sys.argv[1])       thread1.start()       thread1.join()              f = open(output_file,'w')       for proxy in set(PROXY_LIST):         f.write(proxy+"/n")       f.close()       print "Done!"       except SystemExit:       pass       except:         errMsg = traceback.format_exc()         print errMsg         log.error(errMsg)             
發(fā)表評(píng)論 共有條評(píng)論
用戶名: 密碼:
驗(yàn)證碼: 匿名發(fā)表
主站蜘蛛池模板: 隆尧县| 陆河县| 正安县| 苍南县| 静宁县| 鄱阳县| 黑山县| 涿州市| 宣威市| 临猗县| 疏勒县| 上饶县| 报价| 措美县| 蕉岭县| 嘉鱼县| 普兰店市| 南召县| 深泽县| 措勤县| 丰原市| 天台县| 东至县| 汶川县| 绥中县| 金塔县| 大邑县| 乌什县| 陵水| 柳州市| 徐汇区| 裕民县| 枝江市| 沈阳市| 噶尔县| 陵川县| 枣庄市| 璧山县| 平昌县| 恩施市| 诸暨市|