This article mainly introduces the sharing of methods for Python to implement crawler setting proxy IP and disguise as a browser. It has a certain reference value. Now it is shared with everyone. Friends in need can refer to it
1.python crawler browser disguise
#导入urllib.request模块 import urllib.request #设置请求头 headers=("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0") #创建一个opener opener=urllib.request.build_opener() #将headers添加到opener中 opener.addheaders=[headers] #将opener安装为全局 urllib.request.install_opener(opener) #用urlopen打开网页 data=urllib.request.urlopen(url).read().decode('utf-8','ignore')
2.Set proxy
#定义代理ip proxy_addr="122.241.72.191:808" #设置代理 proxy=urllib.request.ProxyHandle({'http':proxy_addr}) #创建一个opener opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandle) #将opener安装为全局 urllib.request.install_opener(opener) #用urlopen打开网页 data=urllib.request.urlopen(url).read().decode('utf-8','ignore')
3. Set up proxy and simulated browser access at the same time
#定义代理ip proxy_addr="122.241.72.191:808" #创建一个请求 req=urllib.request.Request(url) #添加headers req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) #设置代理 proxy=urllib.request.ProxyHandle("http":proxy_addr) #创建一个opener opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandle) #将opener安装为全局 urllib.request.install_opener(opener) #用urlopen打开网页 data=urllib.request.urlopen(req).read().decode('utf-8','ignore')
4. Add multiple information in the request header
import urllib.request page_headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0", "Host":"www.baidu.com", "Cookie":"xxxxxxxx" } req=urllib.request.Request(url,headers=page_headers) data=urllib.request.urlopen(req).read().decode('utf-8','ignore')
5. Add post request parameters
import urllib.request import urllib.parse #设置post参数 page_data=urllib.parse.urlencode([ ('pn',page_num), ('kd',keywords) ]) #设置headers page_headers={ 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0', 'Connection':'keep-alive', 'Host':'www.lagou.com', 'Origin':'https://www.lagou.com', 'Cookie':'JSESSIONID=ABAAABAABEEAAJA8F28C00A88DC4D771796BB5C6FFA2DDA; user_trace_token=20170715131136-d58c1f22f6434e9992fc0b35819a572b', 'Accept':'application/json, text/javascript, */*; q=0.01', 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8', 'Referer':'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E6%8C%96%E6%8E%98?labelWords=&fromSearch=true&suginput=', 'X-Anit-Forge-Token':'None', 'X-Requested-With':'XMLHttpRequest' } #打开网页 req=urllib.request.Request(url,headers=page_headers) data=urllib.request.urlopen(req,data=page_data.encode('utf-8')).read().decode('utf-8')
6. Use phantomjs to simulate browser requests
##
#1.下载phantomjs安装到本地,并设置环境变量 from selenium import webdriver bs=webdriver.PhantomJS() #打开url bs.get(url) #获取网页源码 url_data=bs.page_source #将浏览到的网页保存为图片 bs.get_screenshot_as_file(filename)
7.phantomjs sets user-agent and cookie
from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0") bs = webdriver.PhantomJS(desired_capabilities=dcap) bs.get(url) #删除cookie bs.delete_all_cookies() #设置cookie #cookie格式:在浏览器cookie中查看,一个cookie需要包含以下参数,domain、name、value、path cookie={ 'domain':'.www.baidu.com', #注意前面有. 'name':'xxxx', 'value':'xxxx', 'path':'xxxx' } #向phantomjs中添加cookie bs.add_cookie(cookie)
8.Use the web_driver tool
#1.下载web_driver工具(如chromdriver.exe)及对应的浏览器 #2.将chromdriver.exe放到某个目录,如c:\chromdriver.exe from selenium import webdriver driver=webdriver.Chrome(executable_path="C:\chromdriver.exe") #打开url driver.get(url)
Python video crawler implements downloading headline videos
##Instance of Python crawler grabbing proxy IP and checking availabilityPython method to collect proxy IP and determine whether it is available and update regularlyThe above is the detailed content of Python implements sharing of methods for crawlers to set proxy IP and disguise themselves as browsers. For more information, please follow other related articles on the PHP Chinese website!