1
0
mirror of https://github.com/scrapy/scrapy.git synced 2025-02-24 10:03:55 +00:00

renamed setting REQUESTS_PER_DOMAIN to REQUESTS_PER_SPIDER

This commit is contained in:
Pablo Hoffman 2009-11-06 15:42:11 -02:00
parent 580d82468e
commit d604dca96d
3 changed files with 4 additions and 4 deletions

View File

@ -764,9 +764,9 @@ A dict containing the request download handlers enabled by default in Scrapy.
You should never modify this setting in your project, modify You should never modify this setting in your project, modify
:setting:`REQUEST_HANDLERS` instead. :setting:`REQUEST_HANDLERS` instead.
.. setting:: REQUESTS_PER_DOMAIN .. setting:: REQUESTS_PER_SPIDER
REQUESTS_PER_DOMAIN REQUESTS_PER_SPIDER
------------------- -------------------
Default: ``8`` Default: ``8``

View File

@ -136,7 +136,7 @@ REQUEST_HANDLERS_BASE = {
} }
REQUESTS_QUEUE_SIZE = 0 REQUESTS_QUEUE_SIZE = 0
REQUESTS_PER_DOMAIN = 8 # max simultaneous requests per domain REQUESTS_PER_SPIDER = 8 # max simultaneous requests per domain
# contrib.middleware.retry.RetryMiddleware default settings # contrib.middleware.retry.RetryMiddleware default settings
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests RETRY_TIMES = 2 # initial response + 2 retries = 3 requests

View File

@ -27,7 +27,7 @@ class SiteInfo(object):
if download_delay: if download_delay:
self.max_concurrent_requests = 1 self.max_concurrent_requests = 1
elif max_concurrent_requests is None: elif max_concurrent_requests is None:
self.max_concurrent_requests = settings.getint('REQUESTS_PER_DOMAIN') self.max_concurrent_requests = settings.getint('REQUESTS_PER_SPIDER')
else: else:
self.max_concurrent_requests = max_concurrent_requests self.max_concurrent_requests = max_concurrent_requests