首頁 > 後端開發 > Python教學 > python爬蟲抓取騰訊影片所有電影(程式碼)

python爬蟲抓取騰訊影片所有電影(程式碼)

不言
發布: 2018-10-12 15:12:58
轉載
8821 人瀏覽過

這篇文章帶給大家的內容是關於python爬蟲抓取騰訊影片所有電影(程式碼),有一定的參考價值,有需要的朋友可以參考一下,希望對你有幫助。

用python實現的抓取騰訊影片所有電影的爬蟲

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

# -*- coding: utf-8 -*-

 

import re

 

import urllib2

 

from bs4 import BeautifulSoup

 

import string, time

 

import pymongo

 

NUM     = 0         #全局变量,电影数量

 

m_type  = u''       #全局变量,电影类型

 

m_site  = u'qq' #全局变量,电影网站

#根据指定的URL获取网页内容

 

def gethtml(url):

 

    req = urllib2.Request(url)

 

    response = urllib2.urlopen(req)

 

    html = response.read()

 

    return html

 

#从电影分类列表页面获取电影分类

 

def gettags(html):

 

    global m_type

 

    soup = BeautifulSoup(html)      #过滤出分类内容

 

    #print soup

 

    #<ul class="clearfix _group" gname="mi_type" gtype="1">

 

    tags_all = soup.find_all(&#39;ul&#39;, {&#39;class&#39; : &#39;clearfix _group&#39; , &#39;gname&#39; : &#39;mi_type&#39;})

 

    #print len(tags_all), tags_all

 

    #print str(tags_all[1]).replace(&#39;\n&#39;, &#39;&#39;)

 

    #<a _hot="tag.sub" class="_gtag _hotkey" href="http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html"title="动作" tvalue="0">动作</a>

 

    re_tags = r&#39;<a _hot=\"tag\.sub\" class=\"_gtag _hotkey\" href=\"(.+?)\" title=\"(.+?)\" tvalue=\"(.+?)\">.+?</a>&#39;

 

    p = re.compile(re_tags, re.DOTALL)

 

    tags = p.findall(str(tags_all[0]))

 

    if tags:

 

        tags_url = {}

 

        #print tags

 

        for tag in tags:

 

            tag_url = tag[0].decode(&#39;utf-8&#39;)

 

            #print tag_url

 

            m_type = tag[1].decode(&#39;utf-8&#39;)

 

            tags_url[m_type] = tag_url

    else:

 

            print "Not Find"

 

    return tags_url

 

#获取每个分类的页数

 

def get_pages(tag_url):

 

    tag_html = gethtml(tag_url)

 

    #p class="paginator

 

    soup = BeautifulSoup(tag_html)      #过滤出标记页面的html

 

    #print soup

 

    #<p class="mod_pagenav" id="pager">

 

    p_page = soup.find_all(&#39;p&#39;, {&#39;class&#39; : &#39;mod_pagenav&#39;, &#39;id&#39; : &#39;pager&#39;})

 

    #print p_page #len(p_page), p_page[0]

 

    #<a class="c_txt6" href="http://v.qq.com/list/1_2_-1_-1_1_0_24_20_0_-1_0.html" title="25"><span>25</span></a>

 

    re_pages = r&#39;<a class=.+?><span>(.+?)</span></a>&#39;

 

    p = re.compile(re_pages, re.DOTALL)

 

    pages = p.findall(str(p_page[0]))

 

    #print pages

 

    if len(pages) > 1:

 

        return pages[-2]

 

    else:

 

        return 1

 

def getmovielist(html):

 

    soup = BeautifulSoup(html)

 

    #<ul class="mod_list_pic_130">

 

    ps = soup.find_all(&#39;ul&#39;, {&#39;class&#39; : &#39;mod_list_pic_130&#39;})

 

    #print ps

 

    for p_html in ps:

 

        p_html = str(p_html).replace(&#39;\n&#39;, &#39;&#39;)

 

        #print p_html

 

        getmovie(p_html)

 

def getmovie(html):

 

    global NUM

 

    global m_type

 

    global m_site

    re_movie = r&#39;<li><a class=\"mod_poster_130\" href=\"(.+?)\" target=\"_blank\" title=\"(.+?)\"><img.+?</li>&#39;

 

    p = re.compile(re_movie, re.DOTALL)

 

    movies = p.findall(html)

 

    if movies:

 

        conn = pymongo.Connection(&#39;localhost&#39;, 27017)

 

        movie_db = conn.dianying

 

        playlinks = movie_db.playlinks

 

        #print movies

 

        for movie in movies:

 

            #print movie

 

            NUM += 1

 

            print "%s : %d" % ("=" * 70, NUM)

 

            values = dict(

 

                movie_title = movie[1],

 

                movie_url   = movie[0],

 

                movie_site      = m_site,

 

                movie_type      = m_type

 

                )

 

            print values

 

            playlinks.insert(values)

 

            print "_" * 70

 

            NUM += 1

 

            print "%s : %d" % ("=" * 70, NUM)

 

   

 

    #else:

 

    #   print "Not Find"

 

   

 

def getmovieinfo(url):

 

    html = gethtml(url)

 

    soup = BeautifulSoup(html)

 

   

 

    #pack pack_album album_cover

 

    ps = soup.find_all(&#39;p&#39;, {&#39;class&#39; : &#39;pack pack_album album_cover&#39;})

 

    #print ps[0]

 

   

 

    #<a href="http://www.tudou.com/albumplay/9NyofXc_lHI/32JqhiKJykI.html" target="new" title="《血滴子》独家纪录片" wl="1"> </a>

 

    re_info = r&#39;<a href=\"(.+?)\" target=\"new\" title=\"(.+?)\" wl=\".+?\"> </a>&#39;

 

    p_info = re.compile(re_info, re.DOTALL)

 

    m_info = p_info.findall(str(ps[0]))

 

    if m_info:

 

        return m_info

 

    else:

 

        print "Not find movie info"

 

   

 

    return m_info

 

   

 

   

 

def insertdb(movieinfo):

 

    global conn

 

    movie_db = conn.dianying_at

 

    movies = movie_db.movies

 

    movies.insert(movieinfo)

 

   

 

if __name__ == "__main__":

 

    global conn

 

   

 

    tags_url = "http://v.qq.com/list/1_-1_-1_-1_1_0_0_20_0_-1_0.html"

 

    #print tags_url

 

    tags_html = gethtml(tags_url)

 

    #print tags_html

 

    tag_urls = gettags(tags_html)

 

    #print tag_urls

 

   

 

   

 

    for url in tag_urls.items():

 

        print  str(url[1]).encode(&#39;utf-8&#39;) #,url[0]

 

        maxpage = int(get_pages(str(url[1]).encode(&#39;utf-8&#39;)))

 

        print maxpage

 

   

 

        for x in range(0, maxpage):

 

            #http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html

 

            m_url = str(url[1]).replace(&#39;0_20_0_-1_0.html&#39;, &#39;&#39;)

 

            movie_url = "%s%d_20_0_-1_0.html" % (m_url, x)

 

            print movie_url

 

            movie_html = gethtml(movie_url.encode(&#39;utf-8&#39;))

 

            #print movie_html

 

            getmovielist(movie_html)

 

            time.sleep(0.1)

登入後複製

以上是python爬蟲抓取騰訊影片所有電影(程式碼)的詳細內容。更多資訊請關注PHP中文網其他相關文章!

相關標籤:
本網站聲明
本文內容由網友自願投稿,版權歸原作者所有。本站不承擔相應的法律責任。如發現涉嫌抄襲或侵權的內容,請聯絡admin@php.cn
作者最新文章
熱門教學
更多>
最新下載
更多>
網站特效
網站源碼
網站素材
前端模板