PHP implements multi-threaded crawling based on curl

墨辰丷
Release: 2023-03-30 22:56:02
Original
3520 people have browsed it

This article mainly introduces PHP to implement multi-threaded crawling based on curl. Interested friends can refer to it. I hope it will be helpful to everyone.

PHP can use Curl to complete various file transfer operations, such as simulating a browser to send GET, POST requests, etc. However, because the PHP language itself does not support multi-threading, the efficiency of developing crawler programs is not high, so it is often You need to use the Curl Multi Functions function to achieve concurrent multi-threaded access to multiple url addresses to achieve concurrent multi-threaded crawling of web pages or downloading files.

The code is as follows:

<?php
/*
curl 多线程抓取
*/
 /** 
   * curl 多线程 
   * 
   * @param array $array 并行网址 
   * @param int $timeout 超时时间
   * @return array 
   */ 
 function Curl_http($array,$timeout){
 $res = array();
 $mh = curl_multi_init();//创建多个curl语柄
 $startime = getmicrotime();
 foreach($array as $k=>$url){
  $conn[$k]=curl_init($url);

    curl_setopt($conn[$k], CURLOPT_TIMEOUT, $timeout);//设置超时时间
    curl_setopt($conn[$k], CURLOPT_USERAGENT, &#39;Mozilla/5.0 (compatible; MSIE 5.01; Windows NT 5.0)&#39;);
    curl_setopt($conn[$k], CURLOPT_MAXREDIRS, 7);//HTTp定向级别
    curl_setopt($conn[$k], CURLOPT_HEADER, 0);//这里不要header,加块效率
    curl_setopt($conn[$k], CURLOPT_FOLLOWLOCATION, 1); // 302 redirect
    curl_setopt($conn[$k],CURLOPT_RETURNTRANSFER,1);
    curl_multi_add_handle ($mh,$conn[$k]);
 }
 //防止死循环耗死cpu 这段是根据网上的写法
 do {
  $mrc = curl_multi_exec($mh,$active);//当无数据,active=true
 } while ($mrc == CURLM_CALL_MULTI_PERFORM);//当正在接受数据时
 while ($active and $mrc == CURLM_OK) {//当无数据时或请求暂停时,active=true
  if (curl_multi_select($mh) != -1) {
  do {
   $mrc = curl_multi_exec($mh, $active);
  } while ($mrc == CURLM_CALL_MULTI_PERFORM);
  }
 }

 foreach ($array as $k => $url) {
   curl_error($conn[$k]);
    $res[$k]=curl_multi_getcontent($conn[$k]);//获得返回信息
    $header[$k]=curl_getinfo($conn[$k]);//返回头信息
    curl_close($conn[$k]);//关闭语柄
    curl_multi_remove_handle($mh , $conn[$k]);  //释放资源 
 }

 curl_multi_close($mh);
 $endtime = getmicrotime();
 $diff_time = $endtime - $startime;

 return array(&#39;diff_time&#39;=>$diff_time,
   &#39;return&#39;=>$res,
   &#39;header&#39;=>$header 
   );

 }
 //计算当前时间
 function getmicrotime() {
   list($usec, $sec) = explode(" ",microtime());
   return ((float)$usec + (float)$sec);
 }

 //测试一下,curl 三个网址
 $array = array(
  "http://www.weibo.com/",
  "http://www.renren.com/",
  "http://www.qq.com/"
  );
 $data = Curl_http($array,&#39;10&#39;);//调用
 var_dump($data);//输出
//如果POST的数据大于1024字节,curl并不会直接就发起POST请求
//发送请求时,header中包含一个空的Expect。curl_setopt($ch, CURLOPT_HTTPHEADER, array("Expect:"));
?>
Copy after login

Let’s look at a few more examples

(1) The following code is to capture multiple URLs, and then write the page code of the captured URLs into the specified file

$urls = array(
&#39;http://www.jb51.net/&#39;,
&#39;http://www.google.com/&#39;,
&#39;http://www.example.com/&#39;
); // 设置要抓取的页面URL
$save_to=&#39;/test.txt&#39;; // 把抓取的代码写入该文件
$st = fopen($save_to,"a");
$mh = curl_multi_init();
foreach ($urls as $i => $url) {
$conn[$i] = curl_init($url);
curl_setopt($conn[$i], CURLOPT_USERAGENT, "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)");
curl_setopt($conn[$i], CURLOPT_HEADER ,0);
curl_setopt($conn[$i], CURLOPT_CONNECTTIMEOUT,60);
curl_setopt($conn[$i], CURLOPT_FILE,$st); // 将爬取的代码写入文件
curl_multi_add_handle ($mh,$conn[$i]);
} // 初始化
do {
curl_multi_exec($mh,$active);
} while ($active); // 执行
foreach ($urls as $i => $url) {
curl_multi_remove_handle($mh,$conn[$i]);
curl_close($conn[$i]);
} // 结束清理
curl_multi_close($mh);
fclose($st);
Copy after login

(2) The following code and The above is almost the same, except that this place puts the obtained code into the variable first, and then writes the obtained content to the specified file

$urls = array(
&#39;http://www.jb51.net/&#39;,
&#39;http://www.google.com/&#39;,
&#39;http://www.example.com/&#39;
);
$save_to=&#39;/test.txt&#39;; // 把抓取的代码写入该文件
$st = fopen($save_to,"a");
$mh = curl_multi_init();
foreach ($urls as $i => $url) {
$conn[$i] = curl_init($url);
curl_setopt($conn[$i], CURLOPT_USERAGENT, "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)");
curl_setopt($conn[$i], CURLOPT_HEADER ,0);
curl_setopt($conn[$i], CURLOPT_CONNECTTIMEOUT,60);
curl_setopt($conn[$i],CURLOPT_RETURNTRANSFER,true); // 不将爬取代码写到浏览器,而是转化为字符串
curl_multi_add_handle ($mh,$conn[$i]);
}
do {
curl_multi_exec($mh,$active);
} while ($active);
foreach ($urls as $i => $url) {
$data = curl_multi_getcontent($conn[$i]); // 获得爬取的代码字符串
fwrite($st,$data); // 将字符串写入文件
} // 获得数据变量,并写入文件
foreach ($urls as $i => $url) {
curl_multi_remove_handle($mh,$conn[$i]);
curl_close($conn[$i]);
}
curl_multi_close($mh);
fclose($st);
Copy after login

(3) The following code is implemented using PHP Curl Functions implements concurrent multi-threaded downloading of files

$urls=array(
 &#39;http://www.jb51.net/5w.zip&#39;,
 &#39;http://www.jb51.net/5w.zip&#39;,
 &#39;http://www.jb51.net/5w.zip&#39;
);
$save_to=&#39;./home/&#39;;
$mh=curl_multi_init();
foreach($urls as $i=>$url){
 $g=$save_to.basename($url);
 if(!is_file($g)){
   $conn[$i]=curl_init($url);
   $fp[$i]=fopen($g,"w");
   curl_setopt($conn[$i],CURLOPT_USERAGENT,"Mozilla/4.0(compatible; MSIE 7.0; Windows NT 6.0)");
   curl_setopt($conn[$i],CURLOPT_FILE,$fp[$i]);
   curl_setopt($conn[$i],CURLOPT_HEADER ,0);
   curl_setopt($conn[$i],CURLOPT_CONNECTTIMEOUT,60);
   curl_multi_add_handle($mh,$conn[$i]);
 }
}
do{
 $n=curl_multi_exec($mh,$active);
}while($active);
foreach($urls as $i=>$url){
 curl_multi_remove_handle($mh,$conn[$i]);
 curl_close($conn[$i]);
 fclose($fp[$i]);
}
curl_multi_close($mh);$urls=array(
 &#39;http://www.jb51.net/5w.zip&#39;,
 &#39;http://www.jb51.net/5w.zip&#39;,
 &#39;http://www.jb51.net/5w.zip&#39;
);
$save_to=&#39;./home/&#39;;
$mh=curl_multi_init();
foreach($urls as $i=>$url){
 $g=$save_to.basename($url);
 if(!is_file($g)){
   $conn[$i]=curl_init($url);
   $fp[$i]=fopen($g,"w");
   curl_setopt($conn[$i],CURLOPT_USERAGENT,"Mozilla/4.0(compatible; MSIE 7.0; Windows NT 6.0)");
   curl_setopt($conn[$i],CURLOPT_FILE,$fp[$i]);
   curl_setopt($conn[$i],CURLOPT_HEADER ,0);
   curl_setopt($conn[$i],CURLOPT_CONNECTTIMEOUT,60);
   curl_multi_add_handle($mh,$conn[$i]);
 }
}
do{
 $n=curl_multi_exec($mh,$active);
}while($active);
foreach($urls as $i=>$url){
 curl_multi_remove_handle($mh,$conn[$i]);
 curl_close($conn[$i]);
 fclose($fp[$i]);
}
curl_multi_close($mh);
Copy after login

Summary:The above is the entire content of this article, I hope it will be helpful to everyone's learning.

Related recommendations:

php file upload management system

PHP email sending case

php file upload management system

The above is the detailed content of PHP implements multi-threaded crawling based on curl. For more information, please follow other related articles on the PHP Chinese website!

Related labels:
php
source:php.cn
Statement of this Website
The content of this article is voluntarily contributed by netizens, and the copyright belongs to the original author. This site does not assume corresponding legal responsibility. If you find any content suspected of plagiarism or infringement, please contact admin@php.cn
Popular Tutorials
More>
Latest Downloads
More>
Web Effects
Website Source Code
Website Materials
Front End Template
About us Disclaimer Sitemap
php.cn:Public welfare online PHP training,Help PHP learners grow quickly!