mirror of
https://github.com/scrapy/scrapy.git
synced 2025-02-23 16:03:56 +00:00
use "import scrapy" in templates
This commit is contained in:
parent
62c7daf785
commit
4ddad88d6f
@ -3,9 +3,9 @@
|
||||
# See documentation in:
|
||||
# http://doc.scrapy.org/en/latest/topics/items.html
|
||||
|
||||
from scrapy.item import Item, Field
|
||||
import scrapy
|
||||
|
||||
class ${ProjectName}Item(Item):
|
||||
class ${ProjectName}Item(scrapy.Item):
|
||||
# define the fields for your item here like:
|
||||
# name = Field()
|
||||
# name = scrapy.Field()
|
||||
pass
|
||||
|
@ -1,6 +1,6 @@
|
||||
from scrapy.spider import Spider
|
||||
import scrapy
|
||||
|
||||
class $classname(Spider):
|
||||
class $classname(scrapy.Spider):
|
||||
name = "$name"
|
||||
allowed_domains = ["$domain"]
|
||||
start_urls = (
|
||||
@ -8,4 +8,4 @@ class $classname(Spider):
|
||||
)
|
||||
|
||||
def parse(self, response):
|
||||
pass
|
||||
pass
|
||||
|
@ -1,4 +1,4 @@
|
||||
from scrapy.selector import Selector
|
||||
import scrapy
|
||||
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
|
||||
from scrapy.contrib.spiders import CrawlSpider, Rule
|
||||
from $project_name.items import ${ProjectName}Item
|
||||
@ -13,7 +13,7 @@ class $classname(CrawlSpider):
|
||||
)
|
||||
|
||||
def parse_item(self, response):
|
||||
sel = Selector(response)
|
||||
sel = scrapy.Selector(response)
|
||||
i = ${ProjectName}Item()
|
||||
#i['domain_id'] = sel.xpath('//input[@id="sid"]/@value').extract()
|
||||
#i['name'] = sel.xpath('//div[@id="name"]').extract()
|
||||
|
Loading…
x
Reference in New Issue
Block a user