Python爬虫 西刺代理IP的获取 代理IP
首先介绍一下爬虫为什么要使用代理IP
在爬虫的过程中,我们经常会遇见很多网站采取了防爬取技术,或者说因为自己采集网站信息的强度和采集速度太大,给对方服务器带去了太多的压力。
如果你一直用同一个代理ip爬取这个网页,很有可能ip会被禁止访问网页,所以基本上做爬虫的都躲不过去ip的问题。
所以我们在爬取网页内容的过程中需要使用代理ip,在这里介绍一种获取代理ip方式,从西刺网站获取代理ip。
具体获取步骤如下
使用方式:将需要获取页数传入主函数即可。
在这里使用的是requests方式获取网页内容,使用BeautifulSoup提取网页内容。并将提取到的代理ip分类保存到txt文件中。
1、导包
import requests
import chardet
import random
import time
from bs4 import BeautifulSoup
from telnetlib import Telnet
import progressbar
2、 准备浏览器头部
说明一下,使用requests获取西刺网页代码的时候需要设置浏览器头部否则获取不到网页内容。
user_agent = ["Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)","Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)","Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)","Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)","Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)","Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)","Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)","Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)","Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)","Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6","Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1","Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0","Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5","Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20","Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
]
3、定义获取网页代码函数
def getHtmlWithHeader(url):# 尝试获取网页内容,当获取失败的时候重新获取网页代码# 当失败次数过多时停止获取 并输出提示信息try:# 获取响应内容response = requests.get(url,headers={"User-Agent": random.choice(user_agent)})# 获取编码code = chardet.detect(response.content)["encoding"]# 指定编码response.encoding = code# 输出文本# print(response.text)return response.textexcept:time.sleep(1)global _times_count_times_count += 1if _times_count > 5:print("ip获取失败,请稍后重试")returnprint("第", _times_count, "次尝试抓取")return getHtmlWithHeader(url)
4、从获取到的网页代码中提取代理IP
def getIP(num):# 准备数据列表datalist = []# 准备 urlfor num1 in range(num):url = 'http://www.xicidaili.com/nn/' + str(num1 + 1)# 获取返回数据html = getHtmlWithHeader(url)soup = BeautifulSoup(html, 'html.parser')parent = soup.find(id="ip_list")lis = parent.find_all('tr')# 删除第一条数据lis.pop(0)print("爬取ip地址及相关信息")for i in lis:ip = i.find_all('td')[1].get_text()dk = i.find_all('td')[2].get_text()nm = i.find_all('td')[4].get_text()ty = i.find_all('td')[5].get_text()tm = i.find_all('td')[8].get_text()datalist.append((ip, dk, nm, ty, tm))print("共爬取到", len(datalist), "条数据\n")# 将获取到的数据返回return datalist
5、过滤代理IP,将生存时间短的以及不可用的IP过滤掉
def filtrateIP(datalist):datalist1 = []# 对生存时间短的数据进行过滤print('过滤存活时间短的\n')for i in datalist:if "分钟" not in i[4]:datalist1.append(i)# print(i)print("共过滤掉", len(datalist) - len(datalist1), "条生存时间短的数据")print("还剩", len(datalist1), "条数据\n")# 对得到的数据进行测试,看是否可用print('测试不可用的ip并将其过滤')datalist.clear()v = 1p = progressbar.ProgressBar()for i in p(datalist1):# print("正在检测第"+str(v)+"条数据")v += 1try:Telnet(i[0], i[1], timeout=1)except:passelse:datalist.append(i)print('过滤不可用的ip')print("共过滤掉", len(datalist1) - len(datalist), "条不可用数据")print("还剩", len(datalist), "条数据")# 将过滤后的数据返回return datalist
6、将过滤后的ip按HTTP/HTTPS分类并保存
def saveIP(datalist):# 对得到的数据进行分类 http/httpshttplist = []httpslist = []for i in datalist:if i[3] == 'HTTP':httplist.append('http://' + i[0] + ':' + i[1])else:httpslist.append('https://' + i[0] + ':' + i[1])# 将显示结果显示到屏幕上print("HTTP共" + str(len(httplist)) + "条数据")print(httplist)print("")print("HTTPS共" + str(len(httpslist)) + "条数据")print(httpslist)print("")print("写入文件")# 打开文件f = open('ip地址.txt', 'w', encoding="utf-8")# 写入文件f.write("HTTP\n")f.write(str(httplist) + "\n\n")f.write("HTTPS\n")f.write(str(httpslist))# 关闭文件f.close()
7、最后调用这些函数即可
# num 为爬取的页数
def main(num):datalist = getIP(num)IPlist = filtrateIP(datalist )saveIP(IPlist)if __name__ == '__main__':main(1)
完整的:
import requests
import chardet
import random
import time
from bs4 import BeautifulSoup
from telnetlib import Telnet
import progressbar
# 这个很高大上,可用
user_agent = ["Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)","Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)","Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)","Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)","Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)","Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)","Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)","Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)","Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)","Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6","Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1","Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0","Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5","Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20","Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
]def getHtmlWithHeader(url):# 尝试获取网页内容,当获取失败的时候重新获取网页代码# 当失败次数过多时停止获取 并输出提示信息try:# 获取响应内容response = requests.get(url,headers={"User-Agent": random.choice(user_agent)})# 获取编码code = chardet.detect(response.content)["encoding"]# 指定编码response.encoding = code# 输出文本# print(response.text)return response.textexcept:time.sleep(1)global _times_count_times_count += 1if _times_count > 5:print("ip获取失败,请稍后重试")returnprint("第", _times_count, "次尝试抓取")return getHtmlWithHeader(url)def getIP(num):# 准备数据列表datalist = []# 准备 urlfor num1 in range(num):url = 'http://www.xicidaili.com/nn/' + str(num1 + 1)# 获取返回数据html = getHtmlWithHeader(url)soup = BeautifulSoup(html, 'html.parser')parent = soup.find(id="ip_list")lis = parent.find_all('tr')# 删除第一条数据lis.pop(0)print("爬取ip地址及相关信息")for i in lis:ip = i.find_all('td')[1].get_text()dk = i.find_all('td')[2].get_text()nm = i.find_all('td')[4].get_text()ty = i.find_all('td')[5].get_text()tm = i.find_all('td')[8].get_text()datalist.append((ip, dk, nm, ty, tm))print("共爬取到", len(datalist), "条数据\n")# 将获取到的数据返回return datalistdef filtrateIP(datalist):datalist1 = []# 对生存时间短的数据进行过滤print('过滤存活时间短的\n')for i in datalist:if "分钟" not in i[4]:datalist1.append(i)# print(i)print("共过滤掉", len(datalist) - len(datalist1), "条生存时间短的数据")print("还剩", len(datalist1), "条数据\n")# 对得到的数据进行测试,看是否可用print('测试不可用的ip并将其过滤')datalist.clear()v = 1p = progressbar.ProgressBar()for i in p(datalist1):# print("正在检测第"+str(v)+"条数据")v += 1try:Telnet(i[0], i[1], timeout=1)except:passelse:datalist.append(i)print('过滤不可用的ip')print("共过滤掉", len(datalist1) - len(datalist), "条不可用数据")print("还剩", len(datalist), "条数据")# 将过滤后的数据返回return datalistdef saveIP(datalist):# 对得到的数据进行分类 http/httpshttplist = []httpslist = []for i in datalist:if i[3] == 'HTTP':httplist.append('http://' + i[0] + ':' + i[1])else:httpslist.append('https://' + i[0] + ':' + i[1])# 将显示结果显示到屏幕上print("HTTP共" + str(len(httplist)) + "条数据")print(httplist)print("")print("HTTPS共" + str(len(httpslist)) + "条数据")print(httpslist)print("")print("写入文件")# 打开文件f = open('ip地址.txt', 'w', encoding="utf-8")# 写入文件f.write("HTTP\n")f.write(str(httplist) + "\n\n")f.write("HTTPS\n")f.write(str(httpslist))# 关闭文件f.close()# num 为爬取的页数
def main(num):datalist = getIP(num)IPlist = filtrateIP(datalist)saveIP(IPlist)if __name__ == '__main__':main(1)
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!
