lxml-bs(爬虫高效数据清洗工具)

lxml-bs(爬虫高效数据清洗工具)

1 - lxml解析库

from lxml import etree
import requests#目标网址
url = "https://movie.douban.com/subject/34973399/comments?status=P"
#请求头构造
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}
#基础爬取
response = requests.get(url,headers = headers)
#获取爬取后的网页源代码(后期用于清洗数据)
html_source = response.text#使用xpath的方式清洗到想要的数据
tree = etree.HTML(html_source)
#获取标题(两种方式)
title1 = tree.xpath("//title/text()")[0]
title2 = tree.xpath("string(//title)")
#获取全部内容
comments = tree.xpath("//span[@class = 'short']/text()")
#多属性匹配获取内容
home_page = tree.xpath("//span[@class='first' and @data-page='first']/text()")[0]
#获取全部含有href的节点属性
all_attr = tree.xpath("//*/@href")
#获取父级节点的属性
comment_parent = tree.xpath("//span[@class = 'short']/../@class")

2 - bs解析库

import requests
from bs4 import BeautifulSoup
import re#目标网址
url = "https://movie.douban.com/subject/34973399/comments?status=P"
#请求头构造
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}
#基础爬取
response = requests.get(url,headers = headers)
#获取爬取后的网页源代码(后期用于清洗数据)
html_source = response.text#使用bs4的方式清洗到想要的数据
bs = BeautifulSoup(html_source,"lxml")
#获取标题 (get_text()可用string替代都可以获取其文本内容)
title3 = bs.find_all("title")[0].get_text()
#获取全部内容
comments2 = bs.find_all(name = "span",class_ = 'short')
#多属性匹配获取内容
home_page2 = bs.find_all(name = "span",attrs = {"class":"first","data-page":"first"})[0].string
#获取包含某子字符串的文本节点
text_attr = bs.find_all(name = "span",class_ = 'short',text = re.compile(".*男主.*"))


本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!

相关文章

立即
投稿

微信公众账号

微信扫一扫加关注

返回
顶部