# -*- coding: utf-8 -*-
import requests
def xici_request():
url = 'http://www.xicidaili.com'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'www.xicidaili.com',
'Referer': 'https://www.google.com/',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
res = requests.get(url, headers=headers)
print(res.text)
if __name__ == '__main__':
xici_request()
# -*- coding: utf-8 -*-
import scrapy
from collectips.items import CollectipsItem
class XiciSpider(scrapy.Spider):
name = "xici"
allowed_domains = ["http://www.xicidaili.com"]
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'www.xicidaili.com',
'Referer': 'https://www.google.com/',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
def start_requests(self):
reqs = []
for i in range(1, 21):
req = scrapy.Request(
'http://www.xicidaili.com/nn/{}'.format(i), headers=self.headers)
reqs.append(req)
return reqs
def parse(self, response):
item = CollectipsItem()
sel = response.selector
for i in range(2, 102):
item['IP'] = sel.xpath(
'//*[@id="ip_list"]/tbody/tr[{}]/td[2]/text()'.format(i)).extract()
item['PORT'] = sel.xpath(
'//*[@id="ip_list"]/tbody/tr[{}]/td[3]/text()'.format(i)).extract()
item['DNS_POSITION'] = sel.xpath(
'//*[@id="ip_list"]/tbody/tr[{}]/td[4]/a/text()'.format(i)).extract()
item['TYPE'] = sel.xpath(
'//*[@id="ip_list"]/tbody/tr[{}]/td[6]/text()'.format(i)).extract()
item['SPEED'] = sel.xpath(
'//*[@id="ip_list"]/tbody/tr[{}]/td[7]/p[@title]'.format(i)).extract()
item['LAST_CHECK_TIME'] = sel.xpath(
'//*[@id="ip_list"]/tbody/tr[{}]/td[10]/text()'.format(i)).extract()
yield item
代码如上,为什么requests能返回网页内容,而scrapy却是报错内部服务器错误500? 请大神解救??
同時実行性を考慮していない場合、同時に行われるリクエストが多すぎると、IP が直接ブロックされます