首页 > 解决方案 > 如何从新闻网站上抓取所有评论?

问题描述

我一直在尝试抓取新闻网站的一些内容, 例如新闻描述、标签、评论等。成功地完成了描述和标签。但是,在抓取评论时,标签在通过 beautifulsoup 找到标签后没有显示,尽管如果我检查页面就会显示。

我只想抓取页面中的所有评论(也包括嵌套评论),并将它们作为单个字符串保存在 csv 文件中。


import requests
import bs4
from time import sleep
import os

url = 'https://www.prothomalo.com/bangladesh/article/1573772/%E0%A6%AC%E0%A6%BE%E0%A6%82%E0%A6%B2%E0%A6%BE%E0%A6%A6%E0%A7%87%E0%A6%B6%E0%A6%BF-%E0%A6%AA%E0%A6%BE%E0%A6%B8%E0%A6%AA%E0%A7%8B%E0%A6%B0%E0%A7%8D%E0%A6%9F%E0%A6%A7%E0%A6%BE%E0%A6%B0%E0%A7%80-%E0%A6%B0%E0%A7%8B%E0%A6%B9%E0%A6%BF%E0%A6%99%E0%A7%8D%E0%A6%97%E0%A6%BE%E0%A6%B0%E0%A6%BE-%E0%A6%B8%E0%A7%8C%E0%A6%A6%E0%A6%BF-%E0%A6%A5%E0%A7%87%E0%A6%95%E0%A7%87-%E0%A6%A2%E0%A6%BE%E0%A6%95%E0%A6%BE%E0%A7%9F'

resource = requests.get(url, timeout = 3.0)

soup = bs4.BeautifulSoup(resource.text, 'lxml')

# working as expected
tags = soup.find('div', {'class':'topic_list'})
tag = ''
tags = tags.findAll('a', {'':''})
for t in range(len(tags)):
    tag = tag + tags[t].text + '|'

# working as expected
content_tag = soup.find('div', {'itemprop':'articleBody'})
content_all = content_tag.findAll('p', {'':''})
content = ''
for c in range(len(content_all)):
    content = content + content_all[c].text

# comments not found
comment = soup.find('div', {'class':'comments_holder'})
print(comment)

安慰:

<div class="comments_holder">
<div class="comments_holder_inner">
<div class="comments_loader"> </div>
<ul class="comments_holder_ul latest">
</ul>
</div>
</div>

标签: pythonweb-scrapingbeautifulsoup

解决方案


您在 Firefox/Developer 工具中看到的不是您通过requests. 评论通过 AJAX 单独加载,它们是 JSON 格式。

import re
import json
import requests
from bs4 import BeautifulSoup

url = 'https://www.prothomalo.com/bangladesh/article/1573772/%E0%A6%AC%E0%A6%BE%E0%A6%82%E0%A6%B2%E0%A6%BE%E0%A6%A6%E0%A7%87%E0%A6%B6%E0%A6%BF-%E0%A6%AA%E0%A6%BE%E0%A6%B8%E0%A6%AA%E0%A7%8B%E0%A6%B0%E0%A7%8D%E0%A6%9F%E0%A6%A7%E0%A6%BE%E0%A6%B0%E0%A7%80-%E0%A6%B0%E0%A7%8B%E0%A6%B9%E0%A6%BF%E0%A6%99%E0%A7%8D%E0%A6%97%E0%A6%BE%E0%A6%B0%E0%A6%BE-%E0%A6%B8%E0%A7%8C%E0%A6%A6%E0%A6%BF-%E0%A6%A5%E0%A7%87%E0%A6%95%E0%A7%87-%E0%A6%A2%E0%A6%BE%E0%A6%95%E0%A6%BE%E0%A7%9F'

comment_url = 'https://www.prothomalo.com/api/comments/get_comments_json/?content_id={}'
article_id = re.findall(r'article/(\d+)', url)[0]

comment_data = requests.get(comment_url.format(article_id)).json()

print(json.dumps(comment_data, indent=4))

印刷:

{
    "5529951": {
        "comment_id": "5529951",
        "parent": "0",
        "label_depth": "0",
        "commenter_name": "MD Asif Iqbal",
        "commenter_image": "//profiles.prothomalo.com/profile/999009/picture/",
        "comment": "\u098f\u0987 \u09ad\u09be\u09b0 \u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6\u0995\u09c7 \u09b8\u09be\u09b0\u09be\u099c\u09c0\u09ac\u09a8 \u09ac\u09b9\u09a8 \u0995\u09b0\u09a4\u09c7 \u09b9\u09ac\u09c7",
        "create_time": "2019-01-08 19:59",
        "comment_status": "published",
        "like_count": "\u09e6",
        "dislike_count": "\u09e6",
        "like_me": null,
        "dislike_me": null,
        "device": "phone",
        "content_id": "1573772"
    },
    "5529952": {
        "comment_id": "5529952",
        "parent": "0",

... and so on.

推荐阅读