mirror of
https://github.com/scrapy/scrapy.git
synced 2025-02-06 11:00:46 +00:00
Explicitly mark re-exports. (#6579)
This commit is contained in:
parent
c330a399dc
commit
4d31277bc6
@ -72,9 +72,9 @@ version = {file = "./scrapy/VERSION"}
|
||||
|
||||
[tool.mypy]
|
||||
ignore_missing_imports = true
|
||||
implicit_reexport = false
|
||||
|
||||
# Interface classes are hard to support
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "twisted.internet.interfaces"
|
||||
follow_imports = "skip"
|
||||
@ -92,6 +92,14 @@ follow_imports = "skip"
|
||||
module = "scrapy.settings.default_settings"
|
||||
ignore_errors = true
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "itemadapter"
|
||||
implicit_reexport = true
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = "twisted"
|
||||
implicit_reexport = true
|
||||
|
||||
[tool.bumpversion]
|
||||
current_version = "2.12.0"
|
||||
commit = true
|
||||
@ -359,13 +367,9 @@ ignore = [
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
# Exclude files that are meant to provide top-level imports
|
||||
"scrapy/__init__.py" = ["E402"]
|
||||
"scrapy/core/downloader/handlers/http.py" = ["F401"]
|
||||
"scrapy/http/__init__.py" = ["F401"]
|
||||
"scrapy/linkextractors/__init__.py" = ["E402", "F401"]
|
||||
"scrapy/selector/__init__.py" = ["F401"]
|
||||
"scrapy/spiders/__init__.py" = ["E402", "F401"]
|
||||
# Circular import workarounds
|
||||
"scrapy/linkextractors/__init__.py" = ["E402"]
|
||||
"scrapy/spiders/__init__.py" = ["E402"]
|
||||
|
||||
# Skip bandit in tests
|
||||
"tests/**" = ["S"]
|
||||
|
@ -2,3 +2,8 @@ from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
|
||||
from scrapy.core.downloader.handlers.http11 import (
|
||||
HTTP11DownloadHandler as HTTPDownloadHandler,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"HTTP10DownloadHandler",
|
||||
"HTTPDownloadHandler",
|
||||
]
|
||||
|
@ -15,3 +15,16 @@ from scrapy.http.response.html import HtmlResponse
|
||||
from scrapy.http.response.json import JsonResponse
|
||||
from scrapy.http.response.text import TextResponse
|
||||
from scrapy.http.response.xml import XmlResponse
|
||||
|
||||
__all__ = [
|
||||
"FormRequest",
|
||||
"Headers",
|
||||
"HtmlResponse",
|
||||
"JsonRequest",
|
||||
"JsonResponse",
|
||||
"Request",
|
||||
"Response",
|
||||
"TextResponse",
|
||||
"XmlResponse",
|
||||
"XmlRpcRequest",
|
||||
]
|
||||
|
@ -126,3 +126,8 @@ def _is_valid_url(url: str) -> bool:
|
||||
|
||||
# Top-level imports
|
||||
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor as LinkExtractor
|
||||
|
||||
__all__ = [
|
||||
"IGNORED_EXTENSIONS",
|
||||
"LinkExtractor",
|
||||
]
|
||||
|
@ -4,3 +4,8 @@ Selectors
|
||||
|
||||
# top-level imports
|
||||
from scrapy.selector.unified import Selector, SelectorList
|
||||
|
||||
__all__ = [
|
||||
"Selector",
|
||||
"SelectorList",
|
||||
]
|
||||
|
@ -117,3 +117,12 @@ class Spider(object_ref):
|
||||
from scrapy.spiders.crawl import CrawlSpider, Rule
|
||||
from scrapy.spiders.feed import CSVFeedSpider, XMLFeedSpider
|
||||
from scrapy.spiders.sitemap import SitemapSpider
|
||||
|
||||
__all__ = [
|
||||
"CSVFeedSpider",
|
||||
"CrawlSpider",
|
||||
"Rule",
|
||||
"SitemapSpider",
|
||||
"Spider",
|
||||
"XMLFeedSpider",
|
||||
]
|
||||
|
@ -1,7 +1,8 @@
|
||||
import unittest
|
||||
from abc import ABCMeta
|
||||
from unittest import mock
|
||||
|
||||
from scrapy.item import ABCMeta, Field, Item, ItemMeta
|
||||
from scrapy.item import Field, Item, ItemMeta
|
||||
|
||||
|
||||
class ItemTest(unittest.TestCase):
|
||||
|
@ -6,7 +6,7 @@ import pytest
|
||||
from scrapy.linkextractors import IGNORED_EXTENSIONS
|
||||
from scrapy.spiders import Spider
|
||||
from scrapy.utils.misc import arg_to_iter
|
||||
from scrapy.utils.url import (
|
||||
from scrapy.utils.url import ( # type: ignore[attr-defined]
|
||||
_is_filesystem_path,
|
||||
_public_w3lib_objects,
|
||||
add_http_if_no_scheme,
|
||||
|
Loading…
x
Reference in New Issue
Block a user