python爬虫实现获取下一页代码

时间:2021-05-22

我们首先来看下实例代码:

from time import sleepimport fakerimport requestsfrom lxml import etreefake = faker.Faker()base_url = "http://angelimg.spbeen.com"def get_next_link(url): content = downloadHtml(url) html = etree.HTML(content) next_url = html.xpath("//a[@class='ch next']/@href") if next_url: return base_url + next_url[0] else: return Falsedef downloadHtml(ur): user_agent = fake.user_agent() headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"} response = requests.get(url, headers=headers) return response.textdef getImgUrl(content): html = etree.HTML(content) img_url = html.xpath('//*[@id="content"]/a/img/@src') title = html.xpath(".//div['@class=article']/h2/text()") return img_url[0],title[0]def saveImg(title,img_url): if img_url is not None and title is not None: with open("txt/"+str(title)+".jpg",'wb') as f: user_agent = fake.user_agent() headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"} content = requests.get(img_url, headers=headers) #request_view(content) f.write(content.content) f.close()def request_view(response): import webbrowser request_url = response.url base_url = '<head><base href="%s" rel="external nofollow" >' %(request_url) base_url = base_url.encode() content = response.content.replace(b"<head>",base_url) tem_html = open('tmp.html','wb') tem_html.write(content) tem_html.close() webbrowser.open_new_tab('tmp.html')def crawl_img(url): content = downloadHtml(url) res = getImgUrl(content) title = res[1] img_url = res[0] saveImg(title,img_url)if __name__ == "__main__": url = "http://angelimg.spbeen.com/ang/4968/1" while url: print(url) crawl_img(url) url = get_next_link(url)

python 爬虫如何执行自动下一页循环加载文字

from bs4 import BeautifulSoupimport requestsimport timefrom lxml import etreeimport os# 该demo执行的为如何利用bs去爬一些文字def start(): # 发起网络请求 html=requests.get('http://.cn/zjxw/politics/'+next_page get_html_from_etree(url_2) if __name__ == '__main__': start()

到此这篇关于python爬虫实现获取下一页代码的文章就介绍到这了,更多相关python爬虫获取下一页内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持!

声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。

相关文章