首页 > 解决方案 > 将特定频道中的 YouTube 视频网址抓取到 Json

问题描述

我正在尝试将使用此脚本获得的 url 保存在 json 文件中。但我无法得到它

from bs4 import BeautifulSoup
from lxml import etree
import urllib
import requests
import sys

def fetch_titles(url):
    video_titles = []
    html = requests.get(url)
    soup = BeautifulSoup(html.text, "lxml")
    for entry in soup.find_all("entry"):
        for link in entry.find_all("link"):
            youtube = etree.HTML(urllib.request.urlopen(link["href"]).read()) 
            video_title = youtube.xpath("//span[@id='eow-title']/@title") 
            if len(video_title)>0:
                video_titles.append({"title":video_title[0], "url":link.attrs["href"]})
    return video_titles

def main():
    if sys.argv.__len__() == 1:
        print("Error: You should specifying keyword")
        print("eg: python3 ./main.py KEYWORD")
        return

    url="https://www.youtube.com/feeds/videos.xml?user=LinusTechTips"
    keyword = sys.argv[1]

    video_titles = fetch_titles(url)
    for video in video_titles:
        if video["title"].__contains__(keyword):
            print(video["url"])
            break # add this line, if you want to print the first match only


if __name__ == "__main__":
    main()

我的 json 文件有这个简单的结构

{“网址”:“ https://www.youtube.com/watch?v=xxx ”}

标签: python

解决方案


当您打印第一场比赛并跳过其他比赛时,整体main.py将是这样的:

from bs4 import BeautifulSoup
from lxml import etree
import urllib
import requests
import sys
import json

def fetch_titles(url):
    video_titles = []
    html = requests.get(url)
    soup = BeautifulSoup(html.text, "lxml")
    for entry in soup.find_all("entry"):
        for link in entry.find_all("link"):
            youtube = etree.HTML(urllib.request.urlopen(link["href"]).read()) 
            video_title = youtube.xpath("//span[@id='eow-title']/@title") 
            if len(video_title)>0:
                video_titles.append({"title":video_title[0], "url":link.attrs["href"]})
    return video_titles

def save_as_json(result, json_file_path):  # I've add this function to save result as json file
    data = json.dumps(result)
    print(data)
    with open(json_file_path, 'w') as file:
          file.write(data)

def main():
    if len(sys.argv) == 1:
        print("Error: You should specifying keyword")
        print("eg: python3 ./main.py KEYWORD")
        return
    json_file_path = "file.json"  # json file path
    url="https://www.youtube.com/feeds/videos.xml?user=LinusTechTips"
    keyword = sys.argv[1]

    video_titles = fetch_titles(url)
    result ={"url": video["url"] for video in list(reversed(video_titles)) if keyword in video['title']}

    save_as_json(result, json_file_path)      

if __name__ == "__main__":
    main()

你知道吗?我写了你在问题中提到的python代码。就是这个问题的答案!!

我已将foreach循环替换为单行for,并像这样反转列表list(reversed(video_titles))以匹配第一个结果。

快乐编码!


推荐阅读