python - 如何编写python scrapy代码来提取站点站点地图中存在的url并将其导出到csv
问题描述
我找到了编写 python scrapy 代码的工作解决方案,用于从此处提取站点站点地图中的 url,但不知道如何将数据导出到 CSV 文件!
当我尝试运行scrapy crawl myspider -o mydata.csv它返回一个空的 csv 文件,但 url 列表正在打印在屏幕上!
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import SitemapSpider
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
import re
import requests
class GetpagesfromsitemapSpider(SitemapSpider):
name = "myspider"
handle_httpstatus_list = [404]
def parse(self, response):
print(response.url)
def _parse_sitemap(self, response):
if response.url.endswith('/robots.txt'):
for url in sitemap_urls_from_robots(response.body):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
self.logger.info('Ignoring invalid sitemap: %s', response.url)
return
s = Sitemap(body)
sites = []
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
sites.append(loc)
break
print(sites)
def __init__(self, spider=None, *a, **kw):
super(GetpagesfromsitemapSpider, self).__init__(*a, **kw)
self.spider = spider
l = []
url = "http://www.example.com/"
resp = requests.head(url + "/sitemap.xml")
if (resp.status_code != 404):
l.append(resp.url)
else:
resp = requests.head(url + "/robots.txt")
if (resp.status_code == 200):
l.append(resp.url)
self.sitemap_urls = l
print(self.sitemap_urls)
def iterloc(it, alt=False):
for d in it:
yield d['loc']
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and 'alternate' in d:
for l in d['alternate']:
yield l
解决方案
首先,你没有用scrapy提出任何要求,你也在结合scrapy
,requests
我认为这不是最好的主意。尝试更改__init__
为:
def start_requests(self):
l = []
url = "http://www.example.com"
l.append(url + '/sitemap.xml')
l.append(url + '/robots.txt')
for link in l:
yield Request(link, callback=self._parse_sitemap)
此外,您self._parse_sitemap
应该返回dict-like
或Request
(不仅是您的self._parse_sitemap
,scrapy spider 中的每个函数,请参阅文档):
def _parse_sitemap(self, response):
# handle here status responses(200,401,etc)
body = self._get_sitemap_body(response)
if body is None:
self.logger.info('Ignoring invalid sitemap: %s', response.url)
return
s = Sitemap(body)
sites = {} # You should return a dict-like item!
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
sites.append(loc)
break
yield sites # Change print to yield!, this is the way to populate your .csv file
整个文件(可能不起作用,但解释了这个想法):
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import SitemapSpider
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
import re
import requests
class GetpagesfromsitemapSpider(SitemapSpider):
name = "myspider"
handle_httpstatus_list = [404]
def parse(self, response):
print(response.url)
def _parse_sitemap(self, response):
# handle here status responses(200,401,etc)
body = self._get_sitemap_body(response)
if body is None:
self.logger.info('Ignoring invalid sitemap: %s', response.url)
return
s = Sitemap(body)
sites = {} # You should return a dict-like item!
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
sites.append(loc)
break
yield sites # Change print to yield!, this is the way to populate your .csv file
def start_requests(self):
l = []
url = "http://www.example.com"
l.append(url + '/sitemap.xml')
l.append(url + '/robots.txt')
for link in l:
yield Request(link, callback=self._parse_sitemap)
def iterloc(it, alt=False):
for d in it:
yield d['loc']
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and 'alternate' in d:
for l in d['alternate']:
yield l
推荐阅读
- google-kubernetes-engine - gke ingress 在 ingress nginx 前面
- blob - Instagram 如何实现自适应比特率流媒体?
- c++ - 静态代码分析 - 旧式表达式 C++
- typescript - 将类型参数传递给自定义钩子
- html - 将按钮文本动画化回原始位置
- java - Maven 中的 Spring Boot:“在 META-INF/spring.factories 中找不到自动配置类”
- autocomplete - 赛普拉斯测试材料 ui 自动完成
- java - 将 PDF/图片保存到 Kubernetes configmap 并在 springboot 应用程序中检索
- python - 提高 LSTM - 多类分类问题的准确性
- ios - 如何快速查找字符串数组中出现的字母