python - 我怎样才能让这个蜘蛛为每个项目列表导出一个 JSON 文件?
问题描述
在我的以下文件Reddit.py
中,它有这个蜘蛛:
import scrapy
class RedditSpider(scrapy.Spider):
name = 'Reddit'
allowed_domains = ['reddit.com']
start_urls = ['https://old.reddit.com']
def parse(self, response):
for link in response.css('li.first a.comments::attr(href)').extract():
yield scrapy.Request(url=response.urljoin(link), callback=self.parse_topics)
def parse_topics(self, response):
topics = {}
topics["title"] = response.css('a.title::text').extract_first()
topics["author"] = response.css('p.tagline a.author::text').extract_first()
if response.css('div.score.likes::attr(title)').extract_first() is not None:
topics["score"] = response.css('div.score.likes::attr(title)').extract_first()
else:
topics["score"] = "0"
if int(topics["score"]) > 10000:
author_url = response.css('p.tagline a.author::attr(href)').extract_first()
yield scrapy.Request(url=response.urljoin(author_url), callback=self.parse_user, meta={'topics': topics})
else:
yield topics
def parse_user(self, response):
topics = response.meta.get('topics')
users = {}
users["name"] = topics["author"]
users["karma"] = response.css('span.karma::text').extract_first()
yield users
yield topics
它的作用是从主页获取所有 URL old.reddit
,然后抓取每个 URL 的标题、作者和分数。
我添加的是第二部分,它检查分数是否高于10000,如果是,那么蜘蛛会转到用户的页面并从中刮掉他的业力。
我确实知道我可以从主题页面中刮掉业力,但我想这样做,因为我刮掉了用户页面的其他部分 那在主题页面中不存在.
我想要做的是将topics
包含的列表导出title, author, score
到一个JSON
名为的文件topics.json
中,然后如果主题的分数高于10000users
将包含的列表导出name, karma
到一个JSON
名为的文件users.json
中。
我只知道怎么command-line
用
scrapy runspider Reddit.py -o Reddit.json
它将所有列表导出到一个JSON
名为Reddit
但结构不好的单个文件中
[
{"name": "Username", "karma": "00000"},
{"title": "ExampleTitle1", "author": "Username", "score": "11000"},
{"name": "Username2", "karma": "00000"},
{"title": "ExampleTitle2", "author": "Username2", "score": "12000"},
{"name": "Username3", "karma": "00000"},
{"title": "ExampleTitle3", "author": "Username3", "score": "13000"},
{"title": "ExampleTitle4", "author": "Username4", "score": "9000"},
....
]
我完全不知道Scrapy的知识,Item Pipeline
也不知道如何在我的 Spider 上实现它们,或者如何整体使用它们,试图从文档中理解它,但似乎我不知道如何使用它我的蜘蛛。Item Exporters
Feed Exporters
我想要的最终结果是两个文件:
主题.json
[
{"title": "ExampleTitle1", "author": "Username", "score": "11000"},
{"title": "ExampleTitle2", "author": "Username2", "score": "12000"},
{"title": "ExampleTitle3", "author": "Username3", "score": "13000"},
{"title": "ExampleTitle4", "author": "Username4", "score": "9000"},
....
]
用户.json
[
{"name": "Username", "karma": "00000"},
{"name": "Username2", "karma": "00000"},
{"name": "Username3", "karma": "00000"},
....
]
同时摆脱列表中的重复项。
解决方案
从下面的 SO 线程应用方法
我创建了一个示例刮板
import scrapy
class ExampleSpider(scrapy.Spider):
name = 'example'
allowed_domains = ['example.com']
start_urls = ['http://example.com/']
def parse(self, response):
yield {"type": "unknown item"}
yield {"title": "ExampleTitle1", "author": "Username", "score": "11000"}
yield {"name": "Username", "karma": "00000"}
yield {"name": "Username2", "karma": "00000"}
yield {"someothertype": "unknown item"}
yield {"title": "ExampleTitle2", "author": "Username2", "score": "12000"}
yield {"title": "ExampleTitle3", "author": "Username3", "score": "13000"}
yield {"title": "ExampleTitle4", "author": "Username4", "score": "9000"}
yield {"name": "Username3", "karma": "00000"}
然后在exporters.py
from scrapy.exporters import JsonItemExporter
from scrapy.extensions.feedexport import FileFeedStorage
class JsonMultiFileItemExporter(JsonItemExporter):
types = ["topics", "users"]
def __init__(self, file, **kwargs):
super().__init__(file, **kwargs)
self.files = {}
self.kwargs = kwargs
for itemtype in self.types:
storage = FileFeedStorage(itemtype + ".json")
file = storage.open(None)
self.files[itemtype] = JsonItemExporter(file, **self.kwargs)
def start_exporting(self):
super().start_exporting()
for exporters in self.files.values():
exporters.start_exporting()
def finish_exporting(self):
super().finish_exporting()
for exporters in self.files.values():
exporters.finish_exporting()
exporters.file.close()
def export_item(self, item):
if "title" in item:
itemtype = "topics"
elif "karma" in item:
itemtype = "users"
else:
itemtype = "self"
if itemtype == "self" or itemtype not in self.files:
super().export_item(item)
else:
self.files[itemtype].export_item(item)
在下面添加settings.py
FEED_EXPORTERS = {
'json': 'testing.exporters.JsonMultiFileItemExporter',
}
运行刮板,我生成了 3 个文件
例子.json
[
{"type": "unknown item"},
{"someothertype": "unknown item"}
]
主题.json
[
{"title": "ExampleTitle1", "author": "Username", "score": "11000"},
{"title": "ExampleTitle2", "author": "Username2", "score": "12000"},
{"title": "ExampleTitle3", "author": "Username3", "score": "13000"},
{"title": "ExampleTitle4", "author": "Username4", "score": "9000"}
]
用户.json
[
{"name": "Username", "karma": "00000"},
{"name": "Username2", "karma": "00000"},
{"name": "Username3", "karma": "00000"}
]
推荐阅读
- java - 局部变量背后的想法是最终的,但在使用内部类时外部类字段不是
- javascript - 如何让 span 元素完全覆盖按钮?
- javascript - 如何通过登录 React Admin 来实现额外的自定义对话框(基于服务器响应)?
- postgresql - Postgresql对中等大表的简单查询非常慢
- flutter - 如何在 Dart 中为条件导入设置自定义属性?
- python - 使用python将html页面导出为pdf格式
- javascript - 回调函数不适用于 for 循环
- cisco - Cisco ios 命令了解 L2 和 L3 设备
- git - 如何避免 GIT 在合并时删除以前还原的更改?
- matplotlib - 逻辑回归的梯度下降不收敛