scrapy爬取前程无忧51job网职位信息并存储到数据库
- spiders中代码如下
import scrapy
from scrapy import Request
from QianCheng.items import QianchengItem
import reclass ExampleSpider(scrapy.Spider):name = '51job'def start_requests(self):url_str = 'https://www.51job.com/zhengzhou/'yield Request(url=url_str,callback=self.parse,dont_filter=True,meta={'page':'0'})def parse(self, response):contents = response.xpath('//div[@class = "el"]')for i in contents:urls = i.xpath('p/span[1]/a[@href]/@href').extract()for urll in urls:yield Request(url=urll,callback=self.parse_dail,meta={'page':'1'})if re.search(r'search',response.url):yield Request(url = response.url,callback=self.parse,meta={'page':'2'}) #标记page,再中间件中识别并进行翻页操作def parse_dail(self,response):job_name = response.xpath('//h1[@title]/@title').extract()company =response.xpath('//p[@class="cname"]/a[@title]/@title').extract()saray = response.xpath('//div[@class="cn"]/strong/text()').extract()company_desc = response.xpath('//div[@class="tmsg inbox"]/text()').extract()qianchengs = QianchengItem()qianchengs['job_name'] = ''.join(job_name)qianchengs['company'] = ''.join(company)qianchengs['saray'] = ''.join(saray)qianchengs['company_desc'] = ''.join(company_desc).strip()yield qianchengs
- scrapy.items
import scrapyclass QianchengItem(scrapy.Item):job_name = scrapy.Field()company= scrapy.Field()saray= scrapy.Field()company_desc= scrapy.Field()
- scrapy.piplines获取数据并进行存储操作
import sqlite3class QianchengPipeline(object):def __init__(self):self.conn = sqlite3.connect("qiancheng.db")self.cursor = self.conn.cursor()self.cursor.execute("create table IF NOT EXISTS zhaopin(job_name varchar(200),company varchar(500),saray varchar(100),company_desc varchar(100))")def process_item(self, item, spider):self.cursor.execute("insert into zhaopin values('%s','%s','%s','%s')"%(item["job_name"],item["company"],item["saray"],item["company_desc"]))self.conn.commit()return item
- 中间件中
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FOptions
import time
from scrapy.http import HtmlResponse
class SeleniumMiddlewares(object):
def __init__(self):self.options = FOptions()#self.options.add_argument("-headless")self.browser = webdriver.Firefox(executable_path="/home/hello/Downloads/geckodriver",firefox_options=self.options)
def process_request(self,request,spider):if int(request.meta['page']) == 0:self.browser.get(request.url)input_name =self.browser.find_element_by_xpath('//*[@id="kwdselectid"]')input_name.click()input_name.send_keys('python')btn_seacher = self.browser.find_element_by_xpath('//*[@id="supp"]/div[1]/div/div[1]/button')btn_seacher.click()time.sleep(3)if int(request.meta['page']) == 1:self.browser.get(request.url)time.sleep(3)if int(request.meta['page']) == 2:self.browser.get(request.url)next_page = self.browser.find_element_by_xpath('//a[contains(text(),"下一页")]')next_page.click()return HtmlResponse(url=self.browser.current_url, body=self.browser.page_source, encoding="utf-8",request=request)
- scrapy.setting中:
BOT_NAME = 'QianCheng'
SPIDER_MODULES = ['QianCheng.spiders']
NEWSPIDER_MODULE = 'QianCheng.spiders'
ROBOTSTXT_OBEY = False
DOWNLOADER_MIDDLEWARES = {'QianCheng.middlewares.SeleniumMiddlewares': 543,
}
ITEM_PIPELINES = {'QianCheng.pipelines.QianchengPipeline': 300,
}
- 存储结果如下:
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!
