1
0
mirror of https://github.com/scrapy/scrapy.git synced 2025-02-06 17:11:38 +00:00

Remove unused imports

This commit is contained in:
Adrián Chaves 2019-11-21 14:18:49 +01:00
parent f1fd7ec318
commit a2bf340bab
36 changed files with 84 additions and 108 deletions

View File

@ -21,12 +21,17 @@ addopts =
--ignore=docs/utils
twisted = 1
flake8-ignore =
# Files that are only meant to provide top-level imports are expected not
# to use any of their imports:
scrapy/core/downloader/handlers/http.py F401
scrapy/http/__init__.py F401
# Issues pending a review:
# extras
extras/qps-bench-server.py E261 E501
extras/qpsclient.py E501 E261 E501
# scrapy/commands
scrapy/commands/__init__.py E128 E501
scrapy/commands/check.py F401 E501
scrapy/commands/check.py E501
scrapy/commands/crawl.py E501
scrapy/commands/edit.py E501
scrapy/commands/fetch.py E401 E501 E128 E502 E731
@ -37,7 +42,6 @@ flake8-ignore =
scrapy/commands/shell.py E128 E501 E502
scrapy/commands/startproject.py E502 E127 E501 E128
scrapy/commands/version.py E501 E128
scrapy/commands/view.py F401
# scrapy/contracts
scrapy/contracts/__init__.py E501 W504
scrapy/contracts/default.py E502 E128
@ -46,17 +50,16 @@ flake8-ignore =
scrapy/core/scheduler.py E501
scrapy/core/scraper.py E501 E306 E261 E128 W504
scrapy/core/spidermw.py E501 E731 E502 E126 E226
scrapy/core/downloader/__init__.py F401 E501
scrapy/core/downloader/__init__.py E501
scrapy/core/downloader/contextfactory.py E501 E128 E126
scrapy/core/downloader/middleware.py E501 E502
scrapy/core/downloader/tls.py E501 E305 E241
scrapy/core/downloader/webclient.py E731 E501 E261 E502 E128 E126 E226
scrapy/core/downloader/handlers/__init__.py E501
scrapy/core/downloader/handlers/ftp.py E501 E305 E128 E127
scrapy/core/downloader/handlers/http.py F401
scrapy/core/downloader/handlers/http10.py E501
scrapy/core/downloader/handlers/http11.py E501
scrapy/core/downloader/handlers/s3.py E501 F401 E502 E128 E126
scrapy/core/downloader/handlers/s3.py E501 E502 E128 E126
# scrapy/downloadermiddlewares
scrapy/downloadermiddlewares/ajaxcrawl.py E501 E226
scrapy/downloadermiddlewares/decompression.py E501
@ -66,19 +69,18 @@ flake8-ignore =
scrapy/downloadermiddlewares/httpproxy.py E501
scrapy/downloadermiddlewares/redirect.py E501 W504
scrapy/downloadermiddlewares/retry.py E501 E126
scrapy/downloadermiddlewares/robotstxt.py F401 E501
scrapy/downloadermiddlewares/robotstxt.py E501
scrapy/downloadermiddlewares/stats.py E501
# scrapy/extensions
scrapy/extensions/closespider.py E501 E502 E128 E123
scrapy/extensions/corestats.py E501
scrapy/extensions/feedexport.py E128 E501
scrapy/extensions/httpcache.py E128 E501 E303 F401
scrapy/extensions/httpcache.py E128 E501 E303
scrapy/extensions/memdebug.py E501
scrapy/extensions/spiderstate.py E501
scrapy/extensions/telnet.py E501 W504
scrapy/extensions/throttle.py E501
# scrapy/http
scrapy/http/__init__.py F401
scrapy/http/common.py E501
scrapy/http/cookies.py E501
scrapy/http/request/__init__.py E501
@ -87,7 +89,7 @@ flake8-ignore =
scrapy/http/response/__init__.py E501 E128 W293 W291
scrapy/http/response/text.py E501 W293 E128 E124
# scrapy/linkextractors
scrapy/linkextractors/__init__.py E731 E502 E501 E402 F401
scrapy/linkextractors/__init__.py E731 E502 E501 E402
scrapy/linkextractors/lxmlhtml.py E501 E731 E226
# scrapy/loader
scrapy/loader/__init__.py E501 E502 E128
@ -97,8 +99,8 @@ flake8-ignore =
scrapy/pipelines/images.py E265 E501
scrapy/pipelines/media.py E125 E501 E266
# scrapy/selector
scrapy/selector/__init__.py F403 F401
scrapy/selector/unified.py F401 E501 E111
scrapy/selector/__init__.py F403
scrapy/selector/unified.py E501 E111
# scrapy/settings
scrapy/settings/__init__.py E501
scrapy/settings/default_settings.py E501 E261 E114 E116 E226
@ -106,32 +108,30 @@ flake8-ignore =
# scrapy/spidermiddlewares
scrapy/spidermiddlewares/httperror.py E501
scrapy/spidermiddlewares/offsite.py E501
scrapy/spidermiddlewares/referer.py F401 E501 E129 W503 W504
scrapy/spidermiddlewares/referer.py E501 E129 W503 W504
scrapy/spidermiddlewares/urllength.py E501
# scrapy/spiders
scrapy/spiders/__init__.py F401 E501 E402
scrapy/spiders/__init__.py E501 E402
scrapy/spiders/crawl.py E501
scrapy/spiders/feed.py E501 E261
scrapy/spiders/sitemap.py E501
# scrapy/utils
scrapy/utils/benchserver.py E501
scrapy/utils/boto.py F401
scrapy/utils/conf.py E402 E502 E501
scrapy/utils/console.py E261 F401 E306 E305
scrapy/utils/curl.py F401
scrapy/utils/console.py E261 E306 E305
scrapy/utils/datatypes.py E501 E226
scrapy/utils/decorators.py E501
scrapy/utils/defer.py E501 E128
scrapy/utils/deprecate.py E128 E501 E127 E502
scrapy/utils/engine.py F401 E261
scrapy/utils/engine.py E261
scrapy/utils/gz.py E305 E501 W504
scrapy/utils/http.py F403 F401 E226
scrapy/utils/http.py F403 E226
scrapy/utils/httpobj.py E501
scrapy/utils/iterators.py E501 E701
scrapy/utils/log.py E128 W503
scrapy/utils/markup.py F403 F401 W292
scrapy/utils/markup.py F403 W292
scrapy/utils/misc.py E501 E226
scrapy/utils/multipart.py F403 F401 W292
scrapy/utils/multipart.py F403 W292
scrapy/utils/project.py E501
scrapy/utils/python.py E501
scrapy/utils/reactor.py E226
@ -143,7 +143,7 @@ flake8-ignore =
scrapy/utils/spider.py E271 E501
scrapy/utils/ssl.py E501
scrapy/utils/test.py E501
scrapy/utils/url.py E501 F403 F401 E128 F405
scrapy/utils/url.py E501 F403 E128 F405
# scrapy
scrapy/__init__.py E402 E501
scrapy/_monkeypatches.py W293
@ -167,29 +167,29 @@ flake8-ignore =
scrapy/squeues.py E128
scrapy/statscollectors.py E501
# tests
tests/__init__.py F401 E402 E501
tests/mockserver.py E401 E501 E126 E123 F401
tests/__init__.py E402 E501
tests/mockserver.py E401 E501 E126 E123
tests/pipelines.py F841 E226
tests/spiders.py E501 E127
tests/test_closespider.py E501 E127
tests/test_command_fetch.py E501 E261
tests/test_command_parse.py F401 E501 E128 E303 E226
tests/test_command_parse.py E501 E128 E303 E226
tests/test_command_shell.py E501 E128
tests/test_commands.py F401 E128 E501
tests/test_commands.py E128 E501
tests/test_contracts.py E501 E128 W293
tests/test_crawl.py E501 E741 E265
tests/test_crawler.py F841 E306 E501
tests/test_dependencies.py F841 E501 E305
tests/test_downloader_handlers.py E124 E127 E128 E225 E261 E265 F401 E501 E502 E701 E126 E226 E123
tests/test_downloader_handlers.py E124 E127 E128 E225 E261 E265 E501 E502 E701 E126 E226 E123
tests/test_downloadermiddleware.py E501
tests/test_downloadermiddleware_ajaxcrawlable.py E501
tests/test_downloadermiddleware_cookies.py E731 E741 E501 E128 E303 E265 E126
tests/test_downloadermiddleware_decompression.py E127
tests/test_downloadermiddleware_defaultheaders.py E501
tests/test_downloadermiddleware_downloadtimeout.py E501
tests/test_downloadermiddleware_httpcache.py E501 E305 F401
tests/test_downloadermiddleware_httpcompression.py E501 F401 E251 E126 E123
tests/test_downloadermiddleware_httpproxy.py F401 E501 E128
tests/test_downloadermiddleware_httpcache.py E501 E305
tests/test_downloadermiddleware_httpcompression.py E501 E251 E126 E123
tests/test_downloadermiddleware_httpproxy.py E501 E128
tests/test_downloadermiddleware_redirect.py E501 E303 E128 E306 E127 E305
tests/test_downloadermiddleware_retry.py E501 E128 W293 E251 E502 E303 E126
tests/test_downloadermiddleware_robotstxt.py E501
@ -197,11 +197,11 @@ flake8-ignore =
tests/test_dupefilters.py E221 E501 E741 W293 W291 E128 E124
tests/test_engine.py E401 E501 E502 E128 E261
tests/test_exporters.py E501 E731 E306 E128 E124
tests/test_extension_telnet.py F401 F841
tests/test_feedexport.py E501 F401 F841 E241
tests/test_extension_telnet.py F841
tests/test_feedexport.py E501 F841 E241
tests/test_http_cookies.py E501
tests/test_http_headers.py E501
tests/test_http_request.py F401 E402 E501 E261 E127 E128 W293 E502 E128 E502 E126 E123
tests/test_http_request.py E402 E501 E261 E127 E128 W293 E502 E128 E502 E126 E123
tests/test_http_response.py E501 E301 E502 E128 E265
tests/test_item.py E701 E128 F841 E306
tests/test_link.py E501
@ -211,20 +211,20 @@ flake8-ignore =
tests/test_mail.py E128 E501 E305
tests/test_middleware.py E501 E128
tests/test_pipeline_crawl.py E131 E501 E128 E126
tests/test_pipeline_files.py F401 E501 W293 E303 E272 E226
tests/test_pipeline_images.py F401 F841 E501 E303
tests/test_pipeline_files.py E501 W293 E303 E272 E226
tests/test_pipeline_images.py F841 E501 E303
tests/test_pipeline_media.py E501 E741 E731 E128 E261 E306 E502
tests/test_request_cb_kwargs.py E501
tests/test_responsetypes.py E501 E305
tests/test_robotstxt_interface.py F401 E501 W291 E501
tests/test_robotstxt_interface.py E501 W291 E501
tests/test_scheduler.py E501 E126 E123
tests/test_selector.py F401 E501 E127
tests/test_spider.py E501 F401
tests/test_selector.py E501 E127
tests/test_spider.py E501
tests/test_spidermiddleware.py E501 E226
tests/test_spidermiddleware_httperror.py E128 E501 E127 E121
tests/test_spidermiddleware_offsite.py E501 E128 E111 W293
tests/test_spidermiddleware_output_chain.py F401 E501 W293 E226
tests/test_spidermiddleware_referer.py F401 E501 F841 E125 E201 E261 E124 E501 E241 E121
tests/test_spidermiddleware_output_chain.py E501 W293 E226
tests/test_spidermiddleware_referer.py E501 F841 E125 E201 E261 E124 E501 E241 E121
tests/test_squeues.py E501 E701 E741
tests/test_utils_conf.py E501 E303 E128
tests/test_utils_curl.py E501
@ -235,16 +235,16 @@ flake8-ignore =
tests/test_utils_iterators.py E501 E128 E129 E303 E241
tests/test_utils_log.py E741 E226
tests/test_utils_python.py E501 E303 E731 E701 E305
tests/test_utils_reqser.py F401 E501 E128
tests/test_utils_reqser.py E501 E128
tests/test_utils_request.py E501 E128 E305
tests/test_utils_response.py E501
tests/test_utils_signal.py E741 F841 E731 E226
tests/test_utils_sitemap.py E128 E501 E124
tests/test_utils_spider.py E261 E305
tests/test_utils_template.py E305
tests/test_utils_url.py F401 E501 E127 E305 E211 E125 E501 E226 E241 E126 E123
tests/test_utils_url.py E501 E127 E305 E211 E125 E501 E226 E241 E126 E123
tests/test_webclient.py E501 E128 E122 E303 E402 E306 E226 E241 E123 E126
tests/test_cmdline/__init__.py E502 E501
tests/test_settings/__init__.py F401 E501 E128
tests/test_settings/__init__.py E501 E128
tests/test_spiderloader/__init__.py E128 E501
tests/test_utils_misc/__init__.py E501

View File

@ -1,6 +1,4 @@
from __future__ import print_function
import time
import sys
from collections import defaultdict
from unittest import TextTestRunner, TextTestResult as _TextTestResult

View File

@ -1,4 +1,4 @@
from scrapy.commands import fetch, ScrapyCommand
from scrapy.commands import fetch
from scrapy.utils.response import open_in_browser

View File

@ -1,6 +1,4 @@
from __future__ import absolute_import
import random
import warnings
from time import time
from datetime import datetime
from collections import deque

View File

@ -1,3 +1,2 @@
from __future__ import absolute_import
from .http10 import HTTP10DownloadHandler
from .http11 import HTTP11DownloadHandler as HTTPDownloadHandler

View File

@ -21,7 +21,7 @@ def _get_boto_connection():
return http_request.headers
try:
import boto.auth
import boto.auth # noqa: F401
except ImportError:
_S3Connection = _v19_S3Connection
else:

View File

@ -6,18 +6,16 @@ import os
from email.utils import mktime_tz, parsedate_tz
from importlib import import_module
from time import time
from warnings import warn
from weakref import WeakKeyDictionary
from six.moves import cPickle as pickle
from w3lib.http import headers_raw_to_dict, headers_dict_to_raw
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.project import data_path
from scrapy.utils.python import to_bytes, to_unicode, garbage_collect
from scrapy.utils.python import to_bytes, to_unicode
from scrapy.utils.request import request_fingerprint

View File

@ -118,4 +118,4 @@ class FilteringLinkExtractor(object):
# Top-level imports
from .lxmlhtml import LxmlLinkExtractor as LinkExtractor
from .lxmlhtml import LxmlLinkExtractor as LinkExtractor # noqa: F401

View File

@ -1,4 +1,4 @@
"""
Selectors
"""
from scrapy.selector.unified import *
from scrapy.selector.unified import * # noqa: F401

View File

@ -2,12 +2,10 @@
XPath selectors based on lxml
"""
import warnings
from parsel import Selector as _ParselSelector
from scrapy.utils.trackref import object_ref
from scrapy.utils.python import to_bytes
from scrapy.http import HtmlResponse, XmlResponse
from scrapy.utils.decorators import deprecated
__all__ = ['Selector', 'SelectorList']

View File

@ -10,7 +10,6 @@ from scrapy import signals
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import method_is_overridden
@ -100,6 +99,6 @@ class Spider(object_ref):
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
from scrapy.spiders.crawl import CrawlSpider, Rule # noqa: F401
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider # noqa: F401
from scrapy.spiders.sitemap import SitemapSpider # noqa: F401

View File

@ -7,7 +7,7 @@ from scrapy.exceptions import NotConfigured
def is_botocore():
try:
import botocore
import botocore # noqa: F401
return True
except ImportError:
raise NotConfigured('missing botocore library')

View File

@ -52,7 +52,7 @@ def _embed_standard_shell(namespace={}, banner=''):
except ImportError:
pass
else:
import rlcompleter
import rlcompleter # noqa: F401
readline.parse_and_bind("tab:complete")
@wraps(_embed_standard_shell)
def wrapper(namespace=namespace, banner=''):

View File

@ -4,7 +4,7 @@ from shlex import split
from six.moves.http_cookies import SimpleCookie
from six.moves.urllib.parse import urlparse
from six import string_types, iteritems
from six import iteritems
from w3lib.http import basic_auth_header

View File

@ -1,7 +1,7 @@
"""Some debugging functions for working with the Scrapy engine"""
from __future__ import print_function
from time import time # used in global tests code
# used in global tests code
from time import time # noqa: F401
def get_engine_status(engine):

View File

@ -8,7 +8,7 @@ import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.decorators import deprecated
from w3lib.http import *
from w3lib.http import * # noqa: F401
warnings.warn("Module `scrapy.utils.http` is deprecated, "

View File

@ -6,7 +6,7 @@ For new code, always import from w3lib.html instead of this module
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from w3lib.html import *
from w3lib.html import * # noqa: F401
warnings.warn("Module `scrapy.utils.markup` is deprecated. "

View File

@ -6,7 +6,7 @@ For new code, always import from w3lib.form instead of this module
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from w3lib.form import *
from w3lib.form import * # noqa: F401
warnings.warn("Module `scrapy.utils.multipart` is deprecated. "

View File

@ -12,7 +12,6 @@ from six.moves.urllib.parse import (ParseResult, urldefrag, urlparse, urlunparse
# scrapy.utils.url was moved to w3lib.url and import * ensures this
# move doesn't break old code
from w3lib.url import *
from w3lib.url import _safe_chars, _unquotepath
from scrapy.utils.python import to_unicode

View File

@ -1,9 +1,11 @@
from __future__ import print_function
import sys, time, random, os, json
from six.moves.urllib.parse import urlencode
import json
import os
import random
import sys
from subprocess import Popen, PIPE
from OpenSSL import SSL
from six.moves.urllib.parse import urlencode
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.web.static import File

View File

@ -543,7 +543,7 @@ class Https11InvalidDNSPattern(Https11TestCase):
def setUp(self):
try:
from service_identity.exceptions import CertificateError
from service_identity.exceptions import CertificateError # noqa: F401
except ImportError:
raise unittest.SkipTest("cryptography lib is too old")
self.tls_log_message = 'SSL connection certificate: issuer "/C=IE/O=Scrapy/CN=127.0.0.1", subject "/C=IE/O=Scrapy/CN=127.0.0.1"'
@ -778,7 +778,7 @@ class S3TestCase(unittest.TestCase):
@contextlib.contextmanager
def _mocked_date(self, date):
try:
import botocore.auth
import botocore.auth # noqa: F401
except ImportError:
yield
else:
@ -843,8 +843,10 @@ class S3TestCase(unittest.TestCase):
b'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=')
def test_request_signing5(self):
try: import botocore
except ImportError: pass
try:
import botocore # noqa: F401
except ImportError:
pass
else:
raise unittest.SkipTest(
'botocore does not support overriding date with x-amz-date')

View File

@ -1,12 +1,9 @@
from __future__ import print_function
import time
import tempfile
import shutil
import unittest
import email.utils
from contextlib import contextmanager
import pytest
import sys
from scrapy.http import Response, HtmlResponse, Request
from scrapy.spiders import Spider

View File

@ -70,7 +70,7 @@ class HttpCompressionTest(TestCase):
def test_process_response_br(self):
try:
import brotli
import brotli # noqa: F401
except ImportError:
raise SkipTest("no brotli")
response = self._getresponse('br')

View File

@ -1,11 +1,10 @@
import os
import sys
from functools import partial
from twisted.trial.unittest import TestCase, SkipTest
from twisted.trial.unittest import TestCase
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from scrapy.exceptions import NotConfigured
from scrapy.http import Response, Request
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.crawler import Crawler
from scrapy.settings import Settings

View File

@ -3,7 +3,7 @@ from twisted.conch.telnet import ITelnetProtocol
from twisted.cred import credentials
from twisted.internet import defer
from scrapy.extensions.telnet import TelnetConsole, logger
from scrapy.extensions.telnet import TelnetConsole
from scrapy.utils.test import get_crawler

View File

@ -167,7 +167,7 @@ class S3FeedStorageTest(unittest.TestCase):
create=True)
def test_parse_credentials(self):
try:
import boto
import boto # noqa: F401
except ImportError:
raise unittest.SkipTest("S3FeedStorage requires boto")
aws_credentials = {'AWS_ACCESS_KEY_ID': 'settings_key',
@ -268,7 +268,7 @@ class S3FeedStorageTest(unittest.TestCase):
@defer.inlineCallbacks
def test_store_botocore_without_acl(self):
try:
import botocore
import botocore # noqa: F401
except ImportError:
raise unittest.SkipTest('botocore is required')
@ -288,7 +288,7 @@ class S3FeedStorageTest(unittest.TestCase):
@defer.inlineCallbacks
def test_store_botocore_with_acl(self):
try:
import botocore
import botocore # noqa: F401
except ImportError:
raise unittest.SkipTest('botocore is required')

View File

@ -1,5 +1,3 @@
# -*- coding: utf-8 -*-
import cgi
import unittest
import re
import json
@ -8,7 +6,7 @@ from urllib.parse import unquote_to_bytes
import warnings
from six.moves import xmlrpc_client as xmlrpclib
from six.moves.urllib.parse import urlparse, parse_qs, unquote
from six.moves.urllib.parse import urlparse, parse_qs
from scrapy.http import Request, FormRequest, XmlRpcRequest, JsonRequest, Headers, HtmlResponse
from scrapy.utils.python import to_bytes, to_unicode

View File

@ -1,7 +1,6 @@
import io
import hashlib
import random
import warnings
from tempfile import mkdtemp
from shutil import rmtree

View File

@ -5,7 +5,7 @@ from twisted.trial import unittest
def reppy_available():
# check if reppy parser is installed
try:
from reppy.robots import Robots
from reppy.robots import Robots # noqa: F401
except ImportError:
return False
return True
@ -14,7 +14,7 @@ def reppy_available():
def rerp_available():
# check if robotexclusionrulesparser is installed
try:
from robotexclusionrulesparser import RobotExclusionRulesParser
from robotexclusionrulesparser import RobotExclusionRulesParser # noqa: F401
except ImportError:
return False
return True
@ -23,7 +23,7 @@ def rerp_available():
def protego_available():
# check if protego parser is installed
try:
from protego import Protego
from protego import Protego # noqa: F401
except ImportError:
return False
return True

View File

@ -1,9 +1,9 @@
import warnings
import weakref
from twisted.trial import unittest
from scrapy.http import TextResponse, HtmlResponse, XmlResponse
from scrapy.selector import Selector
from lxml import etree
class SelectorTestCase(unittest.TestCase):

View File

@ -15,7 +15,6 @@ from scrapy.spiders import Spider, CrawlSpider, Rule, XMLFeedSpider, \
CSVFeedSpider, SitemapSpider
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.trackref import object_ref
from scrapy.utils.test import get_crawler

View File

@ -6,7 +6,6 @@ from twisted.internet import defer
from scrapy import Spider, Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
from tests.spiders import MockServerSpider
class LogExceptionMiddleware:

View File

@ -2,7 +2,6 @@ from six.moves.urllib.parse import urlparse
from unittest import TestCase
import warnings
from scrapy.exceptions import NotConfigured
from scrapy.http import Response, Request
from scrapy.settings import Settings
from scrapy.spiders import Spider

View File

@ -1,8 +1,4 @@
# -*- coding: utf-8 -*-
import unittest
import sys
import six
from scrapy.http import Request, FormRequest
from scrapy.spiders import Spider

View File

@ -1,13 +1,9 @@
# -*- coding: utf-8 -*-
import unittest
import six
from six.moves.urllib.parse import urlparse
from scrapy.spiders import Spider
from scrapy.utils.url import (url_is_from_any_domain, url_is_from_spider,
add_http_if_no_scheme, guess_scheme,
parse_url, strip_url)
add_http_if_no_scheme, guess_scheme, strip_url)
__doctests__ = ['scrapy.utils.url']

View File

@ -73,6 +73,7 @@ commands =
basepython = python3.8
deps =
{[testenv]deps}
-r docs/requirements.txt
pytest-flake8
commands =
py.test --flake8 {posargs:docs scrapy tests}