时间:2021-05-22
import requestsimport reimport jsonimport ossession = requests.session()def fetch_url(url): return session.get(url).content.decode('gbk')def get_doc_id(url): return re.findall('view/(.*).html', url)[0]def parse_type(content): return re.findall(r"docType.*?\:.*?\'(.*?)\'\,", content)[0]def parse_title(content): return re.findall(r"title.*?\:.*?\'(.*?)\'\,", content)[0]def parse_doc(content): result = '' url_list = re.findall('(https.*?0.json.*?)\\\\x22}', content) url_list = [addr.replace("\\\\\\/", "/") for addr in url_list] for url in url_list[:-5]: content = fetch_url(url) y = 0 txtlists = re.findall('"c":"(.*?)".*?"y":(.*?),', content) for item in txtlists: if not y == item[1]: y = item[1] n = '\n' else: n = '' result += n result += item[0].encode('utf-8').decode('unicode_escape', 'ignore') return resultdef parse_txt(doc_id): content_url = 'https://wenku.baidu.com/api/doc/getdocinfo?callback=cb&doc_id=' + doc_id content = fetch_url(content_url) md5 = re.findall('"md5sum":"(.*?)"', content)[0] pn = re.findall('"totalPageNum":"(.*?)"', content)[0] rsign = re.findall('"rsign":"(.*?)"', content)[0] content_url = 'https://wkretype.bdimg.com/retype/text/' + doc_id + '?rn=' + pn + '&type=txt' + md5 + '&rsign=' + rsign content = json.loads(fetch_url(content_url)) result = '' for item in content: for i in item['parags']: result += i['c'].replace('\\r', '\r').replace('\\n', '\n') return resultdef parse_other(doc_id): content_url = "https://wenku.baidu.com/browse/getbcsurl?doc_id=" + doc_id + "&pn=1&rn=99999&type=ppt" content = fetch_url(content_url) url_list = re.findall('{"zoom":"(.*?)","page"', content) url_list = [item.replace("\\", '') for item in url_list] if not os.path.exists(doc_id): os.mkdir(doc_id) for index, url in enumerate(url_list): content = session.get(url).content path = os.path.join(doc_id, str(index) + '.jpg') with open(path, 'wb') as f: f.write(content) print("图片保存在" + doc_id + "文件夹")def save_file(filename, content): with open(filename, 'w', encoding='utf8') as f: f.write(content) print('已保存为:' + filename)# test_txt_url = 'https://wenku.baidu.com/view/cbb4af8b783e0912a3162a89.html?from=search'# test_ppt_url = 'https://wenku.baidu.com/view/2b7046e3f78a6529657d5376.html?from=search'# test_pdf_url = 'https://wenku.baidu.com/view/dd6e15c1227916888586d795.html?from=search'# test_xls_url = 'https://wenku.baidu.com/view/eb4a5bb7312b3169a551a481.html?from=search'def main(): url = input('请输入要下载的文库URL地址') content = fetch_url(url) doc_id = get_doc_id(url) type = parse_type(content) title = parse_title(content) if type == 'doc': result = parse_doc(content) save_file(title + '.txt', result) elif type == 'txt': result = parse_txt(doc_id) save_file(title + '.txt', result) else: parse_other(doc_id)if __name__ == "__main__": main()
爬取结果
以上就是python 爬取百度文库并以下载的详细内容,更多关于python 爬取百度文库的资料请关注其它相关文章!
声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。
本文实例讲述了Python实现的爬取百度文库功能。分享给大家供大家参考,具体如下:#-*-coding:utf-8-*-fromseleniumimportwe
冰点文库下载器是一款可以免费下载豆丁文库、百度文库等网站资源的应用软件,冰点文库下载器无需登录也无需积分就可以下载你想要的免费文档并生产pdf和txt或者其他格
本文实例讲述了Python实现爬取百度贴吧帖子所有楼层图片的爬虫。分享给大家供大家参考,具体如下:下载百度贴吧帖子图片,好好看python2.7版本:#codi
在日常生活中,当我们需要一些文档资料时,去百度文库查找并下载无疑是最好的选择之一。不过,百度文库的资料并不是全部都可以免费下载的,有些则需要使用下载劵。那么,怎
今天有位朋友在人人网上吆喝,“哪位大侠知道如何把百度文库里的文章转化成word免费下载”。大家知道,百度文库是一个非常庞大的文档资源库,