Python通过代理多线程抓取图片
Python作为一门功能强大的脚本语言,经常被用来写爬虫程序,下面是Python通过代理多线程抓取图片代码Python爬虫多线程抓取代理服务器参考:http://www.linuxeye.com/program/1763.html
说明:
1. 多线程方式抓取代理服务器,并多线程验证代理服务器
ps 代理服务器是从http://www.cnproxy.com/ (测试只选择了8个页面)抓取
2. 抓取一个网站的图片地址,多线程随机取一个代理服务器下载图片
ps 图片网站地址:http://www.ivsky.com(测试只选择了有限的页面数)
#!/usr/bin/env python#BLOG:blog.linuxeye.com#coding:utf-8import urllib2import reimport threadingimport timeimport randomrawProxyList = []checkedProxyList = []imgurl_list = []#抓取代理网站portdicts ={'v':"3",'m':"4",'a':"2",'l':"9",'q':"0",'b':"5",'i':"7",'w':"6",'r':"8",'c':"1"}targets = []for i in xrange(1,9): target = r"http://www.cnproxy.com/proxy%d.html" % i targets.append(target)#print targets#抓取代理服务器正则p = re.compile(r'''<tr><td>(.+?)<SCRIPT type=text/javascript>document.write\(":"\+(.+?)\)</SCRIPT></td><td>(.+?)</td><td>.+?</td><td>(.+?)</td></tr>''')#获取代理的类class ProxyGet(threading.Thread): def __init__(self,target): threading.Thread.__init__(self) self.target = target def getProxy(self): print "代理服务器目标网站: " + self.target req = urllib2.urlopen(self.target) result = req.read() #print chardet.detect(result) matchs = p.findall(result) for row in matchs: ip=row port =row port = map(lambda x:portdicts,port.split('+')) port = ''.join(port) agent = row addr = row.decode("cp936").encode("utf-8") proxy = #print proxy rawProxyList.append(proxy) def run(self): self.getProxy()#检验代理的类class ProxyCheck(threading.Thread): def __init__(self,proxyList): threading.Thread.__init__(self) self.proxyList = proxyList self.timeout = 5 self.testUrl = "http://www.baidu.com/" self.testStr = "030173" def checkProxy(self): cookies = urllib2.HTTPCookieProcessor() for proxy in self.proxyList: proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(proxy,proxy)}) #print r'http://%s:%s' %(proxy,proxy) opener = urllib2.build_opener(cookies,proxyHandler) opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')] #urllib2.install_opener(opener) t1 = time.time() try: #req = urllib2.urlopen("http://www.baidu.com", timeout=self.timeout) req = opener.open(self.testUrl, timeout=self.timeout) #print "urlopen is ok...." result = req.read() #print "read html...." timeused = time.time() - t1 pos = result.find(self.testStr) #print "pos is %s" %pos if pos > 1: checkedProxyList.append((proxy,proxy,proxy,timeused)) #print "ok ip: %s %s %s %s" %(proxy,proxy,proxy,timeused) else: continue except Exception,e: #print e.message continue def run(self): self.checkProxy()#获取图片地址函数def imgurlList(url_home): global imgurl_list home_page = urllib2.urlopen(url_home) url_re = re.compile(r'<li><a href="(.+?)" target="_blank" rel="nofollow">') pic_re = re.compile(r'<img src="(.*?\.\w{3,4})"') url_list = re.findall(url_re,home_page.read()) for url in url_list: #print url_home+url url_page = urllib2.urlopen(url_home+url) for imgurlList in re.findall(pic_re,url_page.read()): imgurl_list.append(imgurlList)#下载图片的类class getPic(threading.Thread): def __init__(self,imgurl_list): threading.Thread.__init__(self) self.imgurl_list = imgurl_list self.timeout = 5 def downloadimg(self): for imgurl in self.imgurl_list: pic_suffix = imgurl.split('.')[-1] #获取图片后缀 pic_name = str(random.randint(0,10000000000))+'.'+pic_suffix cookies = urllib2.HTTPCookieProcessor() randomCheckedProxy = random.choice(checkedProxyList) #随机取一组代理服务器 proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(randomCheckedProxy,randomCheckedProxy)}) opener = urllib2.build_opener(cookies,proxyHandler) opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')] urllib2.install_opener(opener) try: data_img = opener.open(imgurl,timeout=self.timeout) f = open (pic_name,'wb') f.write(data_img.read()) f.close() except: continue def run(self): self.downloadimg()if __name__ == "__main__": getThreads = [] checkThreads = [] imgurlList('http://www.ivsky.com') getPicThreads = []#对每个目标网站开启一个线程负责抓取代理for i in range(len(targets)): t = ProxyGet(targets) getThreads.append(t)for i in range(len(getThreads)): getThreads.start()for i in range(len(getThreads)): getThreads.join()print '.'*10+"总共抓取了%s个代理" %len(rawProxyList) +'.'*10#开启20个线程负责校验,将抓取到的代理分成20份,每个线程校验一份for i in range(20): t = ProxyCheck(rawProxyList[((len(rawProxyList)+19)/20) * i:((len(rawProxyList)+19)/20) * (i+1)]) checkThreads.append(t)for i in range(len(checkThreads)): checkThreads.start()for i in range(len(checkThreads)): checkThreads.join()print '.'*10+"总共有%s个代理通过校验" %len(checkedProxyList) +'.'*10#开启20个线程随机取一个代理下载图片for i in range(20): t = getPic(imgurl_list[((len(imgurl_list)+19)/20) * i:((len(imgurl_list)+19)/20) * (i+1)]) getPicThreads.append(t)for i in range(len(getPicThreads)): getPicThreads.start()for i in range(len(getPicThreads)): getPicThreads.join()print '.'*10+"总共有%s个图片下载" %len(imgurl_list) +'.'*10#代理排序持久化f= open("proxy_list.txt",'w+')for proxy in sorted(checkedProxyList,cmp=lambda x,y:cmp(x,y)): #print "checked proxy is: %s:%s\t%s\t%s" %(proxy,proxy,proxy,proxy) f.write("%s:%s\t%s\t%s\n"%(proxy,proxy,proxy,proxy))f.close()测试结果:# lsproxy_getpic.py# python proxy_getpic.py代理服务器目标网站: http://www.cnproxy.com/proxy1.html代理服务器目标网站: http://www.cnproxy.com/proxy2.html代理服务器目标网站: http://www.cnproxy.com/proxy3.html代理服务器目标网站: http://www.cnproxy.com/proxy4.html代理服务器目标网站: http://www.cnproxy.com/proxy5.html代理服务器目标网站: http://www.cnproxy.com/proxy6.html代理服务器目标网站: http://www.cnproxy.com/proxy7.html代理服务器目标网站: http://www.cnproxy.com/proxy8.html..........总共抓取了800个代理....................总共有458个代理通过校验....................总共有154个图片下载..........# cat proxy_list.txt | more173.213.113.111:3128 United States 0.432188987732173.213.113.111:8089 United States 0.441318035126173.213.113.111:7808 United States 0.444597005844110.4.24.170:80 香港 香港移动通讯有限公司 0.489440202713211.142.236.135:8080 湖南省株洲市 移动 0.490673780441211.142.236.135:8081 湖南省株洲市 移动 0.518096923828211.142.236.135:8000 湖南省株洲市 移动 0.51860499382211.142.236.135:8082 湖南省株洲市 移动 0.520448207855# ls1001117689.jpg3097883176.jpg5234319709.jpg7012274766.jpg8504924248.jpg1076458640.jpg3144369522.jpg5387877704.jpg7106183143.jpg867723868.jpg1198548712.jpg3161307031.jpg5572092752.jpg7361254661.jpg8746315373.jpg165738192.jpg 3228008315.jpg5575388077.jpg7389537793.jpg8848973192.jpg1704512138.jpg3306931164.jpg5610740708.jpg7407358698.jpg8973834958.jpg1742167711.jpg3320152673.jpg5717429022.jpg7561176207.jpg8976862152.jpg............... Fri Jul 12 11:34:12 CST 2013
摘自:http://blog.linuxeye.com/340.html
len(rawProxyList)+19)/20) * (i+1)]) checkThreads.append(t)for i in range(len(checkThreads)): checkThreads.start()for i in range(len(checkThreads)): checkThreads.join()print '.'*10+"总共有%s个代理通过校验" %len(checkedProxyList) +'.'*10#开启20个线程随机取一个代理
页:
[1]