国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

Python urllib、urllib2、httplib抓取網頁代碼實例

2020-02-23 01:09:20
字體:
來源:轉載
供稿:網友

使用urllib2,太強大了
試了下用代理登陸拉取cookie,跳轉抓圖片......
文檔:http://docs.python.org/library/urllib2.html

直接上demo代碼了
包括:直接拉取,使用Reuqest(post/get),使用代理,cookie,跳轉處理

#!/usr/bin/python# -*- coding:utf-8 -*-# urllib2_test.py# author: wklken# 2012-03-17 wklken@yeah.netimport urllib,urllib2,cookielib,socketurl = "http://www.testurl....." #change yourself#最簡單方式def use_urllib2(): try:  f = urllib2.urlopen(url, timeout=5).read() except urllib2.URLError, e:  print e.reason print len(f)#使用Requestdef get_request(): #可以設置超時 socket.setdefaulttimeout(5) #可以加入參數 [無參數,使用get,以下這種方式,使用post] params = {"wd":"a","b":"2"} #可以加入請求頭信息,以便識別 i_headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5",       "Accept": "text/plain"} #use post,have some params post to server,if not support ,will throw exception #req = urllib2.Request(url, data=urllib.urlencode(params), headers=i_headers) req = urllib2.Request(url, headers=i_headers) #創建request后,還可以進行其他添加,若是key重復,后者生效 #request.add_header('Accept','application/json') #可以指定提交方式 #request.get_method = lambda: 'PUT' try:  page = urllib2.urlopen(req)  print len(page.read())  #like get  #url_params = urllib.urlencode({"a":"1", "b":"2"})  #final_url = url + "?" + url_params  #print final_url  #data = urllib2.urlopen(final_url).read()  #print "Method:get ", len(data) except urllib2.HTTPError, e:  print "Error Code:", e.code except urllib2.URLError, e:  print "Error Reason:", e.reasondef use_proxy(): enable_proxy = False proxy_handler = urllib2.ProxyHandler({"http":"http://proxyurlXXXX.com:8080"}) null_proxy_handler = urllib2.ProxyHandler({}) if enable_proxy:  opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler) else:  opener = urllib2.build_opener(null_proxy_handler, urllib2.HTTPHandler) #此句設置urllib2的全局opener urllib2.install_opener(opener) content = urllib2.urlopen(url).read() print "proxy len:",len(content)class NoExceptionCookieProcesser(urllib2.HTTPCookieProcessor): def http_error_403(self, req, fp, code, msg, hdrs):  return fp def http_error_400(self, req, fp, code, msg, hdrs):  return fp def http_error_500(self, req, fp, code, msg, hdrs):  return fpdef hand_cookie(): cookie = cookielib.CookieJar() #cookie_handler = urllib2.HTTPCookieProcessor(cookie) #after add error exception handler cookie_handler = NoExceptionCookieProcesser(cookie) opener = urllib2.build_opener(cookie_handler, urllib2.HTTPHandler) url_login = "https://www.yourwebsite/?login" params = {"username":"user","password":"111111"} opener.open(url_login, urllib.urlencode(params)) for item in cookie:  print item.name,item.value #urllib2.install_opener(opener) #content = urllib2.urlopen(url).read() #print len(content)#得到重定向 N 次以后最后頁面URLdef get_request_direct(): import httplib httplib.HTTPConnection.debuglevel = 1 request = urllib2.Request("http://www.google.com") request.add_header("Accept", "text/html,*/*") request.add_header("Connection", "Keep-Alive") opener = urllib2.build_opener() f = opener.open(request) print f.url print f.headers.dict print len(f.read())if __name__ == "__main__": use_urllib2() get_request() get_request_direct() use_proxy() hand_cookie()            
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 定边县| 柳江县| 昭苏县| 新疆| 山东省| 苏尼特右旗| 奈曼旗| 布拖县| 衡阳县| 泸水县| 霍林郭勒市| 大渡口区| 海原县| 外汇| 新津县| 抚顺县| 凭祥市| 平潭县| 平安县| 岳阳县| 普宁市| 广平县| 瓦房店市| 湛江市| 邮箱| 阿荣旗| 花莲县| 六枝特区| 山东省| 黔南| 临湘市| 孝感市| 武宣县| 崇明县| 临安市| 米泉市| 鹤山市| 合阳县| 文安县| 虎林市| 通城县|