万字长文带你入门Scrapy - Scrapy简明教程
继续阅读之前请确保已安装了scrapy。 基本安装方法为:pip install scrapy 我们已经在之前的文章中初步介绍了scrapy,本文是前文的进一步拓展。
1,创建一个scrapy项目 2,编写一个爬虫(或蜘蛛spider,本文中含义相同)类用于爬取网站页面并提取数据 3,使用命令行导出爬到的数据 4,递归地爬取子页面 5,了解并使用spider支持的参数
创建爬虫项目
scrapy startproject tutorialscrapy.cfg # 部署配置文件
tutorial/ # 项目的Python模块, 我们从中导入所需代码
__init__.py
items.py # items定义文件
middlewares.py # middlewares(中间件)定义文件
pipelines.py # pipelines(流水线)定义文件
settings.py # 项目配置文件
spiders/ # 存放spider的目录
__init__.py
编写蜘蛛类
import scrapy
class QuotesSpider(scrapy.Spider): name = "quotes"
def start_requests(self): urls = [ 'http://quotes.toscrape.com/page/1/', 'http://quotes.toscrape.com/page/2/', ] for url in urls: yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response): page = response.url.split("/")[-2] filename = 'quotes-%s.html' % page with open(filename, 'wb') as f: f.write(response.body) self.log('Saved file %s' % filename)运行蜘蛛
scrapy crawl quotes... 2016-12-16 21:24:05 [scrapy.core.engine] INFO: Spider opened2016-12-16 21:24:05 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)2016-12-16 21:24:05 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:60232016-12-16 21:24:05 [scrapy.core.engine] DEBUG: Crawled (404) <GET http://quotes.toscrape.com/robots.txt> (referer: None)2016-12-16 21:24:05 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://quotes.toscrape.com/page/1/> (referer: None)2016-12-16 21:24:05 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://quotes.toscrape.com/page/2/> (referer: None)2016-12-16 21:24:05 [quotes] DEBUG: Saved file quotes-1.html2016-12-16 21:24:05 [quotes] DEBUG: Saved file quotes-2.html2016-12-16 21:24:05 [scrapy.core.engine] INFO: Closing spider (finished)...底层执行逻辑
start_requests的简写
import scrapy
class QuotesSpider(scrapy.Spider): name = "quotes" start_urls = [ 'http://quotes.toscrape.com/page/1/', 'http://quotes.toscrape.com/page/2/', ]
def parse(self, response): page = response.url.split("/")[-2] filename = 'quotes-%s.html' % page with open(filename, 'wb') as f: f.write(response.body)数据提取
scrapy shell 'http://quotes.toscrape.com/page/1/'[ ... Scrapy log here ... ]2016-09-19 12:09:27 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://quotes.toscrape.com/page/1/> (referer: None)[s] Available Scrapy objects:[s] scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc)[s] crawler <scrapy.crawler.Crawler object at 0x7fa91d888c90>[s] item {}[s] request <GET http://quotes.toscrape.com/page/1/>[s] response <200 http://quotes.toscrape.com/page/1/>[s] settings <scrapy.settings.Settings object at 0x7fa91d888c10>[s] spider <DefaultSpider 'default' at 0x7fa91c8af990>[s] Useful shortcuts:[s] shelp() Shell help (print this help)[s] fetch(req_or_url) Fetch request (or URL) and update local objects[s] view(response) View response in a browser>>> response.css('title')[<Selector xpath='descendant-or-self::title' data='<title>Quotes to Scrape</title>'>]>>> response.css('title::text').getall()['Quotes to Scrape']>>> response.css('title').getall()['<title>Quotes to Scrape</title>']>>> response.css('title::text').get()'Quotes to Scrape'>>> response.css('title::text')[0].get()'Quotes to Scrape'>>> response.css('title::text').re(r'Quotes.*')['Quotes to Scrape']>>> response.css('title::text').re(r'Q\w+')['Quotes']>>> response.css('title::text').re(r'(\w+) to (\w+)')['Quotes', 'Scrape']XPath简介
>>> response.xpath('//title')[<Selector xpath='//title' data='<title>Quotes to Scrape</title>'>]>>> response.xpath('//title/text()').get()'Quotes to Scrape'提取警句和作者
<div class="quote"> <span class="text">“The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.”</span> <span> by <small class="author">Albert Einstein</small> <a href="/author/Albert-Einstein">(about)</a> </span> <div class="tags"> Tags: <a class="tag" href="/tag/change/page/1/">change</a> <a class="tag" href="/tag/deep-thoughts/page/1/">deep-thoughts</a> <a class="tag" href="/tag/thinking/page/1/">thinking</a> <a class="tag" href="/tag/world/page/1/">world</a> </div></div>$ scrapy shell 'http://quotes.toscrape.com'>>> response.css("div.quote")[<Selector xpath="descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' quote ')]" data='<div class="quote" itemscope itemtype...'>, <Selector xpath="descendant-or-self::div[@class and contains(concat(' ', normalize-space(@class), ' '), ' quote ')]" data='<div class="quote" itemscope itemtype...'>, ...]>>> quote = response.css("div.quote")[0]>>> text = quote.css("span.text::text").get()>>> text'“The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.”'>>> author = quote.css("small.author::text").get()>>> author'Albert Einstein'>>> tags = quote.css("div.tags a.tag::text").getall()>>> tags['change', 'deep-thoughts', 'thinking', 'world']>>> for quote in response.css("div.quote"):... text = quote.css("span.text::text").get()... author = quote.css("small.author::text").get()... tags = quote.css("div.tags a.tag::text").getall()... print(dict(text=text, author=author, tags=tags)){'text': '“The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.”', 'author': 'Albert Einstein', 'tags': ['change', 'deep-thoughts', 'thinking', 'world']}{'text': '“It is our choices, Harry, that show what we truly are, far more than our abilities.”', 'author': 'J.K. Rowling', 'tags': ['abilities', 'choices']}...在spider代码中提取数据
import scrapy
class QuotesSpider(scrapy.Spider): name = "quotes" start_urls = [ 'http://quotes.toscrape.com/page/1/', 'http://quotes.toscrape.com/page/2/', ]
def parse(self, response): for quote in response.css('div.quote'): yield { 'text': quote.css('span.text::text').get(), 'author': quote.css('small.author::text').get(), 'tags': quote.css('div.tags a.tag::text').getall(), }2016-09-19 18:57:19 [scrapy.core.scraper] DEBUG: Scraped from <200 http://quotes.toscrape.com/page/1/>{'tags': ['life', 'love'], 'author': 'André Gide', 'text': '“It is better to be hated for what you are than to be loved for what you are not.”'}2016-09-19 18:57:19 [scrapy.core.scraper] DEBUG: Scraped from <200 http://quotes.toscrape.com/page/1/>{'tags': ['edison', 'failure', 'inspirational', 'paraphrased'], 'author': 'Thomas A. Edison', 'text': "“I have not failed. I've just found 10,000 ways that won't work.”"}存储数据
scrapy crawl quotes -o quotes.jsonscrapy crawl quotes -o quotes.jl追踪链接
<ul class="pager"> <li class="next"> <a href="/page/2/">Next <span aria-hidden="true">→</span></a> </li></ul>>>> response.css('li.next a').get()'<a href="/page/2/">Next <span aria-hidden="true">→</span></a>'>>> response.css('li.next a::attr(href)').get()'/page/2/'>>> response.css('li.next a').attrib['href']'/page/2/'import scrapy
class QuotesSpider(scrapy.Spider): name = "quotes" start_urls = [ 'http://quotes.toscrape.com/page/1/', ]
def parse(self, response): for quote in response.css('div.quote'): yield { 'text': quote.css('span.text::text').get(), 'author': quote.css('small.author::text').get(), 'tags': quote.css('div.tags a.tag::text').getall(), }
next_page = response.css('li.next a::attr(href)').get() if next_page is not None: next_page = response.urljoin(next_page) yield scrapy.Request(next_page, callback=self.parse)创建Requests的快捷方法
import scrapy
class QuotesSpider(scrapy.Spider): name = "quotes" start_urls = [ 'http://quotes.toscrape.com/page/1/', ]
def parse(self, response): for quote in response.css('div.quote'): yield { 'text': quote.css('span.text::text').get(), 'author': quote.css('span small::text').get(), 'tags': quote.css('div.tags a.tag::text').getall(), }
next_page = response.css('li.next a::attr(href)').get() if next_page is not None: yield response.follow(next_page, callback=self.parse)for href in response.css('ul.pager a::attr(href)'): yield response.follow(href, callback=self.parse)for a in response.css('ul.pager a'): yield response.follow(a, callback=self.parse)#aonchors包含多个<a>选择器anchors = response.css('ul.pager a')yield from response.follow_all(anchors, callback=self.parse)yield from response.follow_all(css='ul.pager a', callback=self.parse)更多示例
import scrapy
class AuthorSpider(scrapy.Spider): name = 'author'
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response): author_page_links = response.css('.author + a') yield from response.follow_all(author_page_links, self.parse_author)
pagination_links = response.css('li.next a') yield from response.follow_all(pagination_links, self.parse)
def parse_author(self, response): def extract_with_css(query): return response.css(query).get(default='').strip()
yield { 'name': extract_with_css('h3.author-title::text'), 'birthdate': extract_with_css('.author-born-date::text'), 'bio': extract_with_css('.author-description::text'), }使用spider参数
scrapy crawl quotes -o quotes-humor.json -a tag=humorimport scrapy
class QuotesSpider(scrapy.Spider): name = "quotes"
def start_requests(self): url = 'http://quotes.toscrape.com/' tag = getattr(self, 'tag', None) if tag is not None: url = url + 'tag/' + tag yield scrapy.Request(url, self.parse)
def parse(self, response): for quote in response.css('div.quote'): yield { 'text': quote.css('span.text::text').get(), 'author': quote.css('small.author::text').get(), }
next_page = response.css('li.next a::attr(href)').get() if next_page is not None: yield response.follow(next_page, self.parse)结语
赞 (0)
