'''
Python 爬取国家统计局官网中2018年所有城市数据
'''
import pandas as pd
import requests
import time
from bs4 import BeautifulSoup
def ProvenceGet(url,header):try:response = requests.get(url,header)print(response.status_code) print(response.apparent_encoding) response.encoding = 'GB2312' html = response.textexcept Exception as e:print('网络不正常')soup = BeautifulSoup(html, 'lxml') tb = soup.findAll('a')provence = []html_provence = []for city_html in tb:city_html_str = str(city_html)a = city_html_str.lstrip(').rstrip('
')city_html_list = a.split('">')if city_html_list[1] != '京ICP备05034670号':provence.append(city_html_list[1])html_provence.append(city_html_list[0])provence_html_dict = dict(zip(provence,html_provence))return provence_html_dictpass
def city_get(url):print(url)res = requests.get(url)res.encoding = 'GB2312' html = res.textsoup_1 = BeautifulSoup(html, 'lxml')tb = soup_1.findAll('a')cities = []cit = []cit_a = []for city in tb:city_str = str(city)a = city_str.lstrip(').rstrip('
')city_list = a.split('">')cities.append(city_list[1])for j in range(len(cities)):if j%2 ==1 and (cities[j] != '市辖区' and cities[j] != '县'):cit.append(cities[j])return citpassif __name__ == '__main__':url = r'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2018/index.html'header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}provence_dict = ProvenceGet(url=url,header=header)cities = []url_base = r'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2018/'for key, value in provence_dict.items():url = url_base + valuecity = city_get(url)if city != []:cities.append(city)time.sleep(1)print(cities)dat_col = []for i in range(len(cities)):for j in range(len(cities[i])):if cities[i][j] != '省直辖县级行政区划':dat_col.append(cities[i][j])x = pd.DataFrame(dat_col)x.to_excel(r'./data/dat.xlsx',header=None,index=None)
本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!