python爬虫 爬取58同城上所有城市的租房信息详解

时间:2021-05-22

代码如下

from fake_useragent import UserAgentfrom lxml import etreeimport requests, osimport time, re, datetimeimport base64, json, pymysqlfrom fontTools.ttLib import TTFontua = UserAgent()class CustomException(Exception): def __init__(self, status, msg): self.status = status self.msg = msgclass City_58: ''' 58同城的爬虫类,目前就写这两个 出租房url: https://cd.58.com/chuzu/ cd代表成都缩写 二手房url: https://cd.58.com/ershoufang/ ''' font_dict = { "glyph00001": "0", "glyph00002": "1", "glyph00003": "2", "glyph00004": "3", "glyph00005": "4", "glyph00006": "5", "glyph00007": "6", "glyph00008": "7", "glyph00009": "8", "glyph00010": "9", } conn = None def __init__(self): self.session = requests.Session() self.session.headers = { "user-agent": ua.random } self.__init__all_city() def __init__all_city(self): '''获取所有城市的名字及缩写的对应关系''' api = "https:///(.*?)/", a_xpath.xpath("@href")[0])[0]) assert len(area_key_list) == len(area_value_list), "数据不完整" self.area_dict = {k: v for k, v in zip(area_key_list, area_value_list)} def __get_title_info(self, response): '''获取房屋的分类,比如个人房源,合租房,经纪人,热选房源...''' "listTitle" xml = self.__response_to_xml(response) a_xpath_list = xml.xpath("//div[@class='listTitle']//a[not(@class)]") title_key_list = [] title_value_list = [] for a_xpath in a_xpath_list: title_key_list.append(a_xpath.xpath("span/text()")[0]) title_value_list.append(a_xpath.xpath("@href")[0]) assert len(title_key_list) == len(title_value_list), "数据不完整" return {k: v for k, v in zip(title_key_list, title_value_list)}if __name__ == '__main__': city_58 = City_58() city_58.spider_zufang("重庆")

附上数据库爬取的结果

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。

声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。

相关文章