1
0
mirror of https://github.com/scrapy/scrapy.git synced 2025-02-23 01:23:47 +00:00
scrapy/tests/CrawlerProcess/asyncio_enabled_reactor.py

23 lines
399 B
Python

import asyncio
from twisted.internet import asyncioreactor
asyncioreactor.install(asyncio.get_event_loop())
import scrapy
from scrapy.crawler import CrawlerProcess
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
process = CrawlerProcess(settings={
'ASYNCIO_REACTOR': True,
})
process.crawl(NoRequestsSpider)
process.start()