1
0
mirror of https://github.com/scrapy/scrapy.git synced 2025-02-06 11:00:46 +00:00

Explicitly mark re-exports. (#6579)

This commit is contained in:
Andrey Rakhmatullin 2025-01-03 02:48:14 +04:00 committed by GitHub
parent c330a399dc
commit 4d31277bc6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 52 additions and 10 deletions

View File

@ -72,9 +72,9 @@ version = {file = "./scrapy/VERSION"}
[tool.mypy]
ignore_missing_imports = true
implicit_reexport = false
# Interface classes are hard to support
[[tool.mypy.overrides]]
module = "twisted.internet.interfaces"
follow_imports = "skip"
@ -92,6 +92,14 @@ follow_imports = "skip"
module = "scrapy.settings.default_settings"
ignore_errors = true
[[tool.mypy.overrides]]
module = "itemadapter"
implicit_reexport = true
[[tool.mypy.overrides]]
module = "twisted"
implicit_reexport = true
[tool.bumpversion]
current_version = "2.12.0"
commit = true
@ -359,13 +367,9 @@ ignore = [
]
[tool.ruff.lint.per-file-ignores]
# Exclude files that are meant to provide top-level imports
"scrapy/__init__.py" = ["E402"]
"scrapy/core/downloader/handlers/http.py" = ["F401"]
"scrapy/http/__init__.py" = ["F401"]
"scrapy/linkextractors/__init__.py" = ["E402", "F401"]
"scrapy/selector/__init__.py" = ["F401"]
"scrapy/spiders/__init__.py" = ["E402", "F401"]
# Circular import workarounds
"scrapy/linkextractors/__init__.py" = ["E402"]
"scrapy/spiders/__init__.py" = ["E402"]
# Skip bandit in tests
"tests/**" = ["S"]

View File

@ -2,3 +2,8 @@ from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import (
HTTP11DownloadHandler as HTTPDownloadHandler,
)
__all__ = [
"HTTP10DownloadHandler",
"HTTPDownloadHandler",
]

View File

@ -15,3 +15,16 @@ from scrapy.http.response.html import HtmlResponse
from scrapy.http.response.json import JsonResponse
from scrapy.http.response.text import TextResponse
from scrapy.http.response.xml import XmlResponse
__all__ = [
"FormRequest",
"Headers",
"HtmlResponse",
"JsonRequest",
"JsonResponse",
"Request",
"Response",
"TextResponse",
"XmlResponse",
"XmlRpcRequest",
]

View File

@ -126,3 +126,8 @@ def _is_valid_url(url: str) -> bool:
# Top-level imports
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor as LinkExtractor
__all__ = [
"IGNORED_EXTENSIONS",
"LinkExtractor",
]

View File

@ -4,3 +4,8 @@ Selectors
# top-level imports
from scrapy.selector.unified import Selector, SelectorList
__all__ = [
"Selector",
"SelectorList",
]

View File

@ -117,3 +117,12 @@ class Spider(object_ref):
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import CSVFeedSpider, XMLFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
__all__ = [
"CSVFeedSpider",
"CrawlSpider",
"Rule",
"SitemapSpider",
"Spider",
"XMLFeedSpider",
]

View File

@ -1,7 +1,8 @@
import unittest
from abc import ABCMeta
from unittest import mock
from scrapy.item import ABCMeta, Field, Item, ItemMeta
from scrapy.item import Field, Item, ItemMeta
class ItemTest(unittest.TestCase):

View File

@ -6,7 +6,7 @@ import pytest
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.spiders import Spider
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.url import (
from scrapy.utils.url import ( # type: ignore[attr-defined]
_is_filesystem_path,
_public_w3lib_objects,
add_http_if_no_scheme,