python - 爬取天猫为何加了cookie也爬取不下信息
PHP中文网
PHP中文网 2017-04-17 16:05:31
0
0
586
# -*- coding: utf-8 -*-
import scrapy
from topgoods.items import TopgoodsItem

class TmGoodsSpider(scrapy.Spider):
    name = "tm_goods"
    allowed_domains = ["http://www.tmall.com"]
    start_urls = (
        'http://list.tmall.com/search_product.htm?type=pc&totalPage=100&cat=50025135&sort=d&style=g&from=sn_1_cat-qp&active=1&jumpto=10#J_Filter',
    )
    def start_requests(self):
        url = "http://list.tmall.com/search_product.htm?type=pc&totalPage=100&cat=50025135&sort=d&style=g&from=sn_1_cat-qp&active=1&jumpto=10#J_Filter"
        cookie_str = {
            '_med=dw:1366&dh:768&pw:1366&ph:768&ist:0; cq=ccp%3D1; isg=C6663DCE197F720203B92624681E4B8C; l=AoeH66TTsi4Uak-SSaRFZVakVzRRjFtu; cna=SmmqDk4Ey1oCATtNKm4+v1fc; _tb_token_=Rgq87NAbuYsOqd; ck1=;'#cookie有改动,不是原cookie
        }
        return [
            scrapy.Request(url,cookies=cookies_str),
        ]
    
    #记录处理的页数
    count=0 
     
    def parse(self, response):
          
        TmGoodsSpider.count += 1
        
        ps = response.xpath("//p[@id='J_ItemList']/p[@class='product']/p")
        if not ps:
            self.log( "List Page error--%s"%response.url )
              
        for p in ps:
            item=TopgoodsItem()
            #商品价格
            item["GOODS_PRICE"] = p.xpath("p[@class='productPrice']/em/@title")[0].extract()
            #商品名称
            item["GOODS_NAME"] = p.xpath("p[@class='productTitle']/a/@title")[0].extract()
            #商品连接
            pre_goods_url = p.xpath("p[@class='productTitle']/a/@href")[0].extract()
            item["GOODS_URL"] = pre_goods_url if "http:" in pre_goods_url else ("http:"+pre_goods_url)
            
            yield scrapy.Request(url=item["GOODS_URL"],meta={'item':item},callback=self.parse_detail,
            dont_filter=True)

    def parse_detail(self,response):

        p = response.xpath('//p[@class="extend"]/ul')
        if not p:
            self.log( "Detail Page error--%s"%response.url )
            
        item = response.meta['item']
        p=p[0]
        #店铺名称
        item["SHOP_NAME"] = p.xpath("li[1]/p/a/text()")[0].extract()
        #店铺连接
        item["SHOP_URL"] = p.xpath("li[1]/p/a/@href")[0].extract()
        #公司名称
        item["COMPANY_NAME"] = p.xpath("li[3]/p/text()")[0].extract().strip()
        #公司所在地
        item["COMPANY_ADDRESS"] = p.xpath("li[4]/p/text()")[0].extract().strip()
        
        yield item

感谢小秦大神的回答怎么查找天猫的cookie,但是我加了cookie好像还是报错了,不知道是哪里不对
报错代码:

10-18 20:05:44 [scrapy] INFO: Scrapy 1.0.3 started (bot: topgoods)
10-18 20:05:44 [scrapy] INFO: Optional features available: ssl, http11
10-18 20:05:44 [scrapy] INFO: Overridden settings: {'NEWSPIDER_MODULE': 'to
s.spiders', 'FEED_FORMAT': 'csv', 'SPIDER_MODULES': ['topgoods.spiders'], '
URI': 'abc.csv', 'BOT_NAME': 'topgoods'}
10-18 20:05:45 [scrapy] INFO: Enabled extensions: CloseSpider, FeedExporter
netConsole, LogStats, CoreStats, SpiderState
10-18 20:05:46 [scrapy] INFO: Enabled downloader middlewares: HttpAuthMiddl
, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultH
sMiddleware, MetaRefreshMiddleware, HttpCompressionMiddleware, RedirectMidd
e, CookiesMiddleware, HttpProxyMiddleware, ChunkedTransferMiddleware, Downl
Stats
10-18 20:05:46 [scrapy] INFO: Enabled spider middlewares: HttpErrorMiddlewa
ffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
10-18 20:05:46 [scrapy] INFO: Enabled item pipelines:
dled error in Deferred:
10-18 20:05:46 [twisted] CRITICAL: Unhandled error in Deferred:
10-18 20:05:46 [twisted] CRITICAL:

难道是piplines的问题:我没改原来就设置好的

class TopgoodsPipeline(object):
    def process_item(self, item, spider):
        return item

请各位大神在帮我看看代码,本人非科班小白,第一次用scrapy模拟登入。

PHP中文网
PHP中文网

认证0级讲师

répondre à tous(0)
Derniers téléchargements
Plus>
effets Web
Code source du site Web
Matériel du site Web
Modèle frontal
À propos de nous Clause de non-responsabilité Sitemap
Site Web PHP chinois:Formation PHP en ligne sur le bien-être public,Aidez les apprenants PHP à grandir rapidement!