Web crawler - the crawler under Python3.6 always crawls the content of the first page repeatedly
为情所困
为情所困 2017-05-18 11:01:43
0
1
829

The question is like the title:
Changed to while, I tried a lot, but it didn’t work, please give me some advice

# coding:utf-8
#  
from lxml import etree
import requests,lxml.html,os

class MyError(Exception):
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)
      
def get_lawyers_info(url):
    r = requests.get(url)
    html = lxml.html.fromstring(r.content)
    # phones = html.xpath('//span[@class="law-tel"]')
    phones = html.xpath('//span[@class="phone pull-right"]')
    # names = html.xpath('//p[@class="fl"]/p/a')
    names = html.xpath('//h4[@class="text-center"]')
    if(len(phones) == len(names)):
        list(zip(names,phones))
        phone_infos = [(names[i].text, phones[i].text_content()) for i in range(len(names))]
    else:
        error = "Lawyers amount are not equal to the amount of phone_nums: "+url
        raise MyError(error)
    phone_infos_list = []
    for phone_info in phone_infos:
        if(phone_info[0] == ""):
            info = "没留姓名"+": "+phone_info[1]+"\r\n"
        else:
            info = phone_info[0]+": "+phone_info[1]+"\r\n"
        print (info)
        phone_infos_list.append(info)
    return phone_infos_list

dir_path = os.path.abspath(os.path.dirname(__file__))
print (dir_path)
file_path = os.path.join(dir_path,"lawyers_info.txt")
print (file_path)
if os.path.exists(file_path):
    os.remove(file_path)

with open("lawyers_info.txt","ab") as file:  
    for i in range(1000):
        url = "http://www.xxxx.com/cooperative_merchants?searchText=&industry=100&provinceId=19&cityId=0&areaId=0&page="+str(i+1)
        # r = requests.get(url)
        # html = lxml.html.fromstring(r.content)
        # phones = html.xpath('//span[@class="phone pull-right"]')
        # names = html.xpath('//h4[@class="text-center"]')    
        # if phones or names:
        info = get_lawyers_info(url)
        for each in info:
            file.write(each.encode("gbk"))
为情所困
为情所困

reply all(1)
左手右手慢动作
# coding: utf-8

import requests
from pyquery import PyQuery as Q

url = 'http://www.51myd.com/cooperative_merchants?industry=100&provinceId=19&cityId=0&areaId=0&page='

with open('lawyers_info.txt', 'ab') as f:
    for i in range(1, 5):
        r = requests.get('{}{}'.format(url, i))
        usernames = Q(r.text).find('.username').text().split()
        phones = Q(r.text).find('.phone').text().split()

        print zip(usernames, phones)
Latest Downloads
More>
Web Effects
Website Source Code
Website Materials
Front End Template