#!/usr/bin/python3
# -*- coding:utf-8 -*-
from lxml import etree
import requestsdef resouban(url):# TODO 这个函数简单,就是获取热搜榜的信息# 热搜的名称、url以及排序和热度(通过热度可以过滤置顶)dic = {"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Mobile Safari/537.36",'cookie': 'SINAGLOBAL=5702871423690.898.1595515471453; SCF=Ah2tNvhR8eWX01S-DmF8uwYWORUbgfA0U3GnciJplYvqE1sn2zJtPdkJ9ork9dAVV8G7m-9kbF-PwIHsf3jHsUw.; SUB=_2A25NDifYDeRhGeBK7lYS9ifFwjSIHXVu8UmQrDV8PUJbkNANLRmlkW1NR7rne18NXZNqVxsfD3DngazoVlT-Fvpf; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhhI1TcfcjnxZJInnV-kd405NHD95QcSh-Xe0q41K.RWs4DqcjQi--ciK.RiKLsi--Ni-24i-iWi--Xi-z4iKyFi--fi-2XiKLhSKeEeBtt; wvr=6; _s_tentry=www.sogou.com; UOR=,,www.sogou.com; Apache=9073188868783.379.1611369496580; ULV=1611369496594:3:3:3:9073188868783.379.1611369496580:1611281802597; webim_unReadCount=%7B%22time%22%3A1611369649613%2C%22dm_pub_total%22%3A0%2C%22chat_group_client%22%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A63%2C%22msgbox%22%3A0%7D'}resp = requests.get(url, headers=dic)html = etree.HTML(resp.text)divs = html.xpath('/html/body/div/section/ul')resou_dic={}resouxinxi_num=0for i in range(0,60):for div in divs:text = div.xpath(f'./li[{i}]/a/span/text()')if len(text)==0:continuetext=text[0]num=div.xpath(f'./li[{i}]/a/strong/text()')hot_num=div.xpath(f'./li[{i}]/a/span/em/text()')son_url=div.xpath(f'./li[{i}]/a/@href')son_url='https://s.weibo.com'+son_url[0]# print(num, text, hot_num, son_url)resou_dic[f'{i}']=[num,text,hot_num,son_url]with open('热搜数据.txt', 'a', encoding='utf-8') as f:for i in resou_dic:# print(i)f.write(str(resou_dic[i])[1:] + '\n')resouxinxi_num+=1f.write('\n\n')print('一共有热搜:'+str(resouxinxi_num)+'条')print("输入你想要获取第几条热搜")num = input()return resou_dic[num][1],resou_dic[num][3]def weiboxinxi(url):# TODO 这个函数获取热搜博文就信息,不过url需要在热搜函数返回的url基础上进行拼接dic = {"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Mobile Safari/537.36",'cookie': 'SINAGLOBAL=5702871423690.898.1595515471453; SCF=Ah2tNvhR8eWX01S-DmF8uwYWORUbgfA0U3GnciJplYvqE1sn2zJtPdkJ9ork9dAVV8G7m-9kbF-PwIHsf3jHsUw.; SUB=_2A25NDifYDeRhGeBK7lYS9ifFwjSIHXVu8UmQrDV8PUJbkNANLRmlkW1NR7rne18NXZNqVxsfD3DngazoVlT-Fvpf; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhhI1TcfcjnxZJInnV-kd405NHD95QcSh-Xe0q41K.RWs4DqcjQi--ciK.RiKLsi--Ni-24i-iWi--Xi-z4iKyFi--fi-2XiKLhSKeEeBtt; wvr=6; _s_tentry=www.sogou.com; UOR=,,www.sogou.com; Apache=9073188868783.379.1611369496580; ULV=1611369496594:3:3:3:9073188868783.379.1611369496580:1611281802597; webim_unReadCount=%7B%22time%22%3A1611369649613%2C%22dm_pub_total%22%3A0%2C%22chat_group_client%22%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A63%2C%22msgbox%22%3A0%7D'}response = requests.get(url, headers=dic)response.encoding = "utf-8"data=response.json()# 导语:# print(data['data']['cards'][0]['desc'])with open('页面博文.txt','a',encoding='utf-8') as f:f.write(data['data']['cards'][0]['desc']+'\n')f.write('热度分析页面:'+data['data']['cards'][0]['scheme']+'\n\n')# 热度分析呈现的连接(需要的话把这个页面的图片这种热度图也可以爬下来)# print(data['data']['cards'][0]['scheme'])xinxi_num=0for i in data['data']['cards']:if 'mblog' in i:with open('页面博文.txt', 'a', encoding='utf-8') as f:f.write("博主信息:"+'\n')f.write('博主id:'+str(i['mblog']['user']['id'])+'\n')f.write('博主博名:'+str(i['mblog']['user']['screen_name'])+'\n')f.write('博主粉丝数:'+str(i['mblog']['user']['followers_count'])+'\n')f.write('博文内容'+str(i['mblog']['text'])+'\n')f.write('博文id:'+str(i['mblog']['id'])+'\t'+'发博时间:'+str(i['mblog']['created_at'])+'\t'+'评论数:'+str(i['mblog']['comments_count'])+'\t'+'点赞数:'+str(i['mblog']['attitudes_count'])+'\t'+'转发数:'+str(i['mblog']['reposts_count'])+'\n')f.write('\n\n')xinxi_num+=1print("共有博文"+str(xinxi_num)+"条")# print(i['mblog']['text'])# 页面显示什么,这里就给什么文本,所以文本太长会留下全文两个字,再就是url太长,# TODO 有的是特有信息,可以根据场景再改进代码def get_conmmtens():# TODO 获取某个博文的评论,通过对获取的微博信息得到微博id,完成url的拼接,然后获取评论# TODO 其实不止是这里,每个地方都要注意,因为没有设置容错代码,所以输入的数字其实是有局限的,比如# 说这里,由于每个页面能显示出来的博文数不一样,要是这里太大可能就提示关键字错误,因为超过这个数# ,可能就是其他形式的url,原来设置的url拼接规则就不合适了,那个url就可能没有这个信息,所以报错# TODO 所以写了一个没有什么信息也可以容纳的代码(还要很多地方会报错,超索引是最常见的,就不修改了# 因为做成Qt就不会有这种错误了)f=open('页面博文.txt','r',encoding='utf-8')t=f.read()xinxi=t.split('\n\n')# TODO 这里t有大小限制以及输入限制print('输入要获取第几条博文的评论')t=eval(input())# 取出博文idbowen_id=xinxi[t].split('\n')[-1].split('\t')[0].split(':')[1]# url='https://m.weibo.cn/comments/hotflow?'+'id=4687951096713999&mid=4687951096713999'+'&max_id_type=0'url='https://m.weibo.cn/comments/hotflow?'+f'id={bowen_id}&mid={bowen_id}'+'&max_id_type=0'commtents(url,bowen_id)# # TODO 调用评论函数
def commtents(url,id):commtents_info={}response=requests.get(url)data=response.json()# pprint.pprint(data)try:# print(data)# print(url)cards=data['data']['data']except:with open('评论数据.txt', 'a', encoding='utf-8') as f:f.write('这个博文没有什么数据可以值得获取的'+f'https://m.weibo.cn/detail/{id}'+'\n\n')f.write('\n\n')returnfor c in cards:# 有可能没有这个关键字,然后就会报错(但是通用的显示像是评论应该都是一样的)c.get('',None) 这样# 没有关键字就是返回Nonetext=c['text'].split('<')[0]commtents_info['text']=textuser=c['user']commtents_info['user_id']=user['id']commtents_info['name']= user['screen_name']commtents_info['time']= c['created_at']commtents_info['like']=c['like_count']with open('评论数据.txt','a',encoding='utf-8') as f:for i in commtents_info:i=str(i)f.write(i+":"+str(commtents_info[i])+'\n')f.write('\n\n')# TODO 写一下逻辑
# TODO 先获取热搜榜,再根据热搜榜得到每个热搜的url进行拼接,得到有博文信息的url,再获得博文的相关信息,
# TODO 然后获取博文id,再进行拼接,进入博文页面,得到博文评论信息
def main():while 1:# 做成界面还是更灵活print('输入数字1获取微博热搜')n=eval(input())if n==1:url='https://s.weibo.com/top/summary?cate=realtimehot'son_text, son_url = resouban(url)print(son_text)resou_url = "https://m.weibo.cn/api/container/getIndex?containerid=231522type%3D1%26q%3D" + son_url[son_url.find("=") + 1:son_url.rfind("&")] + '&page_type=searchall'weiboxinxi(resou_url)get_conmmtens()main()
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!