1
0
mirror of https://github.com/scrapy/scrapy.git synced 2025-02-25 22:43:57 +00:00

added lxml based link extractor for supporting sites with broken htm

--HG--
extra : convert_revision : svn%3Ab85faa78-f9eb-468e-a121-7cced6da292c%40955
This commit is contained in:
Ismael Carnales 2009-03-02 19:49:42 +00:00
parent 127e5ae029
commit 875e8e57d3

View File

@ -1,10 +1,11 @@
from HTMLParser import HTMLParser
from lxml import etree
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
from scrapy.utils.url import safe_url_string, urljoin_rfc as urljoin
class LinkExtractor(HTMLParser):
class HtmlParserLinkExtractor(HTMLParser):
"""LinkExtractor are used to extract links from web pages. They are
instantiated and later "applied" to a Response using the extract_links
@ -26,10 +27,10 @@ class LinkExtractor(HTMLParser):
* a tag name which is used to search for links (defaults to "a")
* a function which receives a tag name and returns whether to scan it
* attr (string or function)
* an attribute name which is used to search for links (defaults to "href")
* a function which receives an attribute name and returns whether to scan it
* an attrsute name which is used to search for links (defaults to "href")
* a function which receives an attrsute name and returns whether to scan it
* process (funtion)
* a function wich receives the attribute value before assigning it
* a function wich receives the attrsute value before assigning it
* unique - if True the same urls won't be extracted twice, otherwise the
same urls will be extracted multiple times (with potentially different link texts)
"""
@ -94,3 +95,68 @@ class LinkExtractor(HTMLParser):
it doesn't contain any patterns"""
return True
class LxmlLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
scan_tag = tag if callable(tag) else lambda t: t == tag
scan_attr = attr if callable(attr) else lambda a: a == attr
process_attr = process if callable(process) else lambda v: v
self.unique = unique
target = LinkTarget(scan_tag, scan_attr, process_attr)
self.parser = etree.HTMLParser(target=target)
def _extract_links(self, response_text, response_url, response_encoding):
self.base_url, self.links = etree.HTML(response_text, self.parser)
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = self.base_url if self.base_url else response_url
for link in links:
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url,
response.encoding)
class LinkTarget(object):
def __init__(self, scan_tag, scan_attr, process_attr):
self.scan_tag = scan_tag
self.scan_attr = scan_attr
self.process_attr = process_attr
self.base_url = None
self.links = []
self.current_link = None
def start(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs.iteritems():
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def end(self, tag):
self.current_link = None
def data(self, data):
if self.current_link and not self.current_link.text:
self.current_link.text = data.strip()
def close(self):
return self.base_url, self.links