1
0
mirror of https://github.com/scrapy/scrapy.git synced 2025-02-06 11:00:46 +00:00

Remove deprecated classes BaseSgmlLinkExtractor, RegexLinkExtractor and SgmlLinkExtractor (#4356)

This commit is contained in:
Artur Shellunts 2020-07-17 12:40:04 +02:00 committed by GitHub
parent d29bec60d7
commit 62a4ede5e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 2 additions and 196 deletions

View File

@ -1,41 +0,0 @@
import re
from urllib.parse import urljoin
from w3lib.html import remove_tags, replace_entities, replace_escape_chars, get_base_url
from scrapy.link import Link
from scrapy.linkextractors.sgml import SgmlLinkExtractor
linkre = re.compile(
"<a\s.*?href=(\"[.#]+?\"|\'[.#]+?\'|[^\s]+?)(>|\s.*?>)(.*?)<[/ ]?a>",
re.DOTALL | re.IGNORECASE)
def clean_link(link_text):
"""Remove leading and trailing whitespace and punctuation"""
return link_text.strip("\t\r\n '\"\x0c")
class RegexLinkExtractor(SgmlLinkExtractor):
"""High performant link extractor"""
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
def clean_text(text):
return replace_escape_chars(remove_tags(text.decode(response_encoding))).strip()
def clean_url(url):
clean_url = ''
try:
clean_url = urljoin(base_url, replace_entities(clean_link(url.decode(response_encoding))))
except ValueError:
pass
return clean_url
if base_url is None:
base_url = get_base_url(response_text, response_url, response_encoding)
links_text = linkre.findall(response_text)
return [Link(clean_url(url).encode(response_encoding),
clean_text(text))
for url, _, text in links_text]

View File

@ -1,151 +0,0 @@
"""
SGMLParser-based Link extractors
"""
import warnings
from urllib.parse import urljoin
from sgmllib import SGMLParser
from w3lib.url import safe_url_string, canonicalize_url
from w3lib.html import strip_html5_whitespace
from scrapy.link import Link
from scrapy.linkextractors import FilteringLinkExtractor
from scrapy.utils.misc import arg_to_iter, rel_has_nofollow
from scrapy.utils.python import unique as unique_list, to_unicode
from scrapy.utils.response import get_base_url
from scrapy.exceptions import ScrapyDeprecationWarning
class BaseSgmlLinkExtractor(SGMLParser):
def __init__(self, tag="a", attr="href", unique=False, process_value=None,
strip=True, canonicalized=False):
warnings.warn(
"BaseSgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
SGMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_value = (lambda v: v) if process_value is None else process_value
self.current_link = None
self.unique = unique
self.strip = strip
if canonicalized:
self.link_key = lambda link: link.url
else:
self.link_key = lambda link: canonicalize_url(link.url,
keep_fragments=True)
def _extract_links(self, response_text, response_url, response_encoding, base_url=None):
""" Do the real extraction work """
self.reset()
self.feed(response_text)
self.close()
ret = []
if base_url is None:
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in self.links:
if isinstance(link.url, str):
link.url = link.url.encode(response_encoding)
try:
link.url = urljoin(base_url, link.url)
except ValueError:
continue
link.url = safe_url_string(link.url, response_encoding)
link.text = to_unicode(link.text, response_encoding, errors='replace').strip()
ret.append(link)
return ret
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if necessary
"""
return unique_list(links, key=self.link_key) if self.unique else links
def extract_links(self, response):
# wrapper needed to allow to work directly with text
links = self._extract_links(response.body, response.url, response.encoding)
links = self._process_links(links)
return links
def reset(self):
SGMLParser.reset(self)
self.links = []
self.base_url = None
self.current_link = None
def unknown_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
if self.strip and value is not None:
value = strip_html5_whitespace(value)
url = self.process_value(value)
if url is not None:
link = Link(url=url, nofollow=rel_has_nofollow(dict(attrs).get('rel')))
self.links.append(link)
self.current_link = link
def unknown_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
class SgmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=False, unique=True,
process_value=None, deny_extensions=None, restrict_css=(),
strip=True, restrict_text=()):
warnings.warn(
"SgmlLinkExtractor is deprecated and will be removed in future releases. "
"Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
lx = BaseSgmlLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process_value=process_value, strip=strip,
canonicalized=canonicalize)
super(SgmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
allow_domains=allow_domains, deny_domains=deny_domains,
restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
canonicalize=canonicalize, deny_extensions=deny_extensions,
restrict_text=restrict_text)
def extract_links(self, response):
base_url = None
if self.restrict_xpaths:
base_url = get_base_url(response)
body = u''.join(f
for x in self.restrict_xpaths
for f in response.xpath(x).getall()
).encode(response.encoding, errors='xmlcharrefreplace')
else:
body = response.body
links = self._extract_links(body, response.url, response.encoding, base_url)
links = self._process_links(links)
return links

View File

@ -1,5 +1,3 @@
scrapy/linkextractors/sgml.py
scrapy/linkextractors/regex.py
scrapy/downloadermiddlewares/cookies.py
scrapy/extensions/statsmailer.py
scrapy/extensions/memusage.py

View File

@ -1,7 +1,7 @@
<html>
<head>
<base href='http://example.com' />
<title>Sample page with links for testing RegexLinkExtractor</title>
<title>Sample page with links for testing LinkExtractor</title>
</head>
<body>
<div id='wrapper'>

View File

@ -2,7 +2,7 @@
<head>
<meta http-equiv="Content-Type" content="text/html; charset=latin-1">
<base href='http://example.com' />
<title>Sample page with links for testing RegexLinkExtractor</title>
<title>Sample page with links for testing LinkExtractor</title>
</head>
<body>
<div id='wrapper'>