Python中如何将爬取到的数据循环存入到csv文件中?
伊谢尔伦
伊谢尔伦 2017-04-18 10:24:48
0
2
2436

求大神指导 再此感激不尽!!!

我想要把输出的结果存入到csv文件中

我的代码如下:(Python 需要3.5版本的)

# coding:utf-8
import requests
import json
import time

time_unix = time.time()
time_unix = str(time_unix).split('.')[0]
headers = {
    'Host': 'sec.wedengta.com',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.113 Safari/537.36',
    'Connection': 'keep-alive',
    'Content-Language': 'zh-CN,zh;q=0.8',
    'Accept': 'application / json',
    # 'Accept-Language': ' zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
    'X-Requested-With': 'XMLHttpRequest',
    'Referer': 'https://sec.wedengta.com/findPool.html?dt_from=web&title=%E8%85%BE%E8%AE%AF%E6%B6%A8%E5%81%9C%E6%9D%BF%E9%A2%84%E6%B5%8B%20-%20%E5%8E%86%E5%8F%B2%E6%8E%A8%E8%8D%90&id=99970_56&webviewType=userActivitesType&dt_page_type=11&timeStamp=1488801827765&barType=null&accessp=null',
    'Accept-Encoding': 'gzip, deflate, br'
    }
url = 'https://sec.wedengta.com/getIntelliStock?action=IntelliSecPool&id=99970_56&_={0}'.format(time_unix)
request = requests.get(url, headers=headers)
temp = str(json.loads(request.text)).replace('\\', '')
dic = eval(temp)
content = dic['content']
content = eval(content)
vtDaySec = content['vtDaySec']

for every in vtDaySec:
    for every_company in every['vtSec']:
        print(every['sOptime'])
        print(every_company['sChnName'])
        print(every_company['sDtCode'][4:])
        print('\n')

运行出来结果截图:

我想要的结果是:

伊谢尔伦
伊谢尔伦

小伙看你根骨奇佳,潜力无限,来学PHP伐。

répondre à tous(2)
小葫芦
#coding=utf-8

import requests
import json
import time
import csv
import codecs

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.113 Safari/537.36'
}
url = 'https://sec.wedengta.com/getIntelliStock?action=IntelliSecPool&id=99970_56&_={0}'.format(time.time())
r = requests.get(url, headers=headers)
result = json.loads(r.text)

rows = []
for every in json.loads(result['content'])['vtDaySec']:
    for company in every['vtSec']:
        row = (
            every['sOptime'].encode('utf-8'),
            company['sChnName'].encode('utf-8'),
            company['sDtCode'][4:].encode('utf-8')
        )
        rows.append(row)

with codecs.open('company.csv', 'wb') as f:
    f.write(codecs.BOM_UTF8)
    writer = csv.writer(f)
    writer.writerow(['date', 'stk_name', 'stk_num'])
    writer.writerows(rows)
小葫芦
csv = open('data.csv', 'w')

for every in vtDaySec:
    for every_company in every['vtSec']:
        csv.write(','.join([every['sOptime'], every_company['sChnName'], every_company['sDtCode'][4:]]) + '\n')

csv.close()
Derniers téléchargements
Plus>
effets Web
Code source du site Web
Matériel du site Web
Modèle frontal