初学python!爬取Boss直聘招聘信息并存入csv文件当中
女朋友最近打算换工作,但是又没多少时间去一页页的看每个招聘信息,于是自学了一个半月python的我不自量力的打算把Boss直聘的一些招聘信息给爬下来~~,不多说,直接上代码吧。
因为是新手嘛,刚练手,也没有想太多,就是一条线写下去就完了
第一步,常规操作,导入库,我是图简单,直接导入了xpinyin库,直接生成拼音文件
import requests
from lxml import etree
from urllib.parse import urlencode
import ssl
import csv
import time
from xpinyin import Pinyinpin = Pinyin()
"""搜索的职业名称"""
position_type = '幼教培训师'"""输出文件名"""
csv_name = pin.get_pinyin(position_type)+'.csv'
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#写入文件表头的字段
with open(csv_name, 'a', newline = '', encoding = 'utf-8') as fp:writer = csv.writer(fp)writer.writerow(['职业名称', '公司简介', '薪资', '要求', '福利', '岗位职责及要求', '地址', '网址', '获取时间'])HEADERS = {'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36','referer':'这个很简单吧','cookie':'填写可用的cookies'
}
第二步,写一个固定的url来骗取总页数
#先用固定的url获取总页数
#1、构建固定的参数
params = {'query':position_type,'page':1,'ka':'page-1'
}
Base_url = 'https://www.zhipin.com/c101200100/?'
url = Base_url + urlencode(params)
print(url)#2、发送请求骗取页数
resp = requests.get(url, headers= HEADERS)
resp.encoding ='utf-8'
text = resp.text
#print(text)
html = etree.HTML(text)
'''total_page页数在此,需要加入如果没有第二页的判断'''
try:total_page = html.xpath("//div[@class='page']/a[last()-1]/text()")[0]print("总页数一共为:" + str(total_page) + "!")
except:total_page = Noneprint("没有第二页!!!!!")
#3、获取列表页下面的每个详情页的url
href = html.xpath("//div[@class='info-primary']//a/@href")
for h in href:detail_url = 'https://www.zhipin.com' + hprint(detail_url)print("\n当前时间为:" + str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))time.sleep(5)#4、通过详情页去获取内容,是详情页,而不是上面的列表页urlresp = requests.get(detail_url, headers= HEADERS)resp.encoding ='utf-8'text = resp.texthtml = etree.HTML(text)title = html.xpath("//h1/text()")[0]try:company_profile = html.xpath("//div[@class='job-sec company-info']/div[@class='text']/text()")[0].strip() + "..."except:company_profile = '没有公司简介!!!!!!!!'print(company_profile)salary = html.xpath("//span[@class='salary']/text()")[0]req = html.xpath("//div[@class='info-primary']//p/text()")[:3]job_tag = html.xpath("//div[@class='tag-container']//div[@class='tag-all job-tags']//span/text()")positions = html.xpath("//div[@class='job-sec']/div[@class='text']/text()")position_description = []for posi in positions:position_description.append(posi.strip())address = html.xpath("//div[@class='job-location']/div[@class='location-address']/text()")[0].strip()with open(csv_name, 'a', newline = '', encoding = 'utf-8') as fp:writer = csv.writer(fp)writer.writerow([title, company_profile, salary, req, job_tag, position_description, address, detail_url, current_time])
第三步,写了一个方法,用于在循环中修改参数获取第二页之后的数据
def get_detail_urls(url):resp = requests.get(url, headers= HEADERS)resp.encoding ='utf-8'text = resp.texthtml = etree.HTML(text)href = html.xpath("//div[@class='info-primary']//a/@href")for h in href:detail_url = 'https://www.zhipin.com' + hprint(detail_url)print("\n当前时间为:" + str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))time.sleep(5)get_detail_content(detail_url)#获取详情页的内容
def get_detail_content(url):resp = requests.get(url, headers= HEADERS)resp.encoding ='utf-8'text = resp.texthtml = etree.HTML(text)title = html.xpath("//h1/text()")[0]try:company_profile = html.xpath("//div[@class='job-sec company-info']/div[@class='text']/text()")[0].strip() + "..."except:company_profile = '没有公司简介!!!!!!!!'salary = html.xpath("//span[@class='salary']/text()")[0]req = html.xpath("//div[@class='info-primary']//p/text()")[:3]job_tag = html.xpath("//div[@class='tag-container']//div[@class='tag-all job-tags']//span/text()")positions = html.xpath("//div[@class='job-sec']/div[@class='text']/text()")position_description = []for posi in positions:position_description.append(posi.strip())address = html.xpath("//div[@class='job-location']/div[@class='location-address']/text()")[0].strip()with open(csv_name, 'a', newline = '', encoding = 'utf-8') as fp:writer = csv.writer(fp)writer.writerow([title, company_profile, salary, req, job_tag, position_description, address, detail_url, current_time])#定义主方法获取所有列表页
def spider():if total_page is not None:for i in range(2, int(total_page)+1):print("当前在第" + str(i) + "页!")ka = 'page-' + str(i)params = {'query':position_type,'page':i,'ka':ka}Base_url = 'https://www.zhipin.com/c101200100/?'url = Base_url + urlencode(params)get_detail_urls(url)if __name__ == '__main__':spider()
因为我只是爬一些公司的数据方便浏览,所以设置的爬取速度并不快,OK,到此结束!
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!
