首页 > 解决方案 > 如何将抓取的结果从多个网站页面保存到 CSV 文件中?

问题描述

我正在尝试使用 selenium 和 beautifulsoup 从亚马逊网站(只是 ASIN)上抓取一些 ASIN(比如说 600 个 ASIN)。我的主要问题是如何将所有抓取的数据保存到 CSV 文件中?我已经尝试了一些东西,但它只保存了最后一个抓取的页面。

这是代码:

from time import sleep
import requests
import time
import json
import re
import sys
import numpy as np
from selenium import webdriver
import urllib.request
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
import pandas as pd
from urllib.request import urlopen


i = 1
while(True):
    try:
        if i == 1:
            url = "https://www.amazon.es/s?k=doll&i=toys&rh=n%3A599385031&dc&page=1"
        else:
            url = "https://www.amazon.es/s?k=doll&i=toys&rh=n%3A599385031&dc&page={}".format(i)
        r = requests.get(url)
        soup = BeautifulSoup(r.content, 'html.parser')

        #print page url
        print(url)

        #rest of the scraping code
        driver = webdriver.Chrome()
        driver.get(url)

        HTML = driver.page_source
        HTML1=driver.page_source
        soup = BeautifulSoup(HTML1, "html.parser")
        styles = soup.find_all(name="div", attrs={"data-asin":True})
        res1 = [i.attrs["data-asin"] for i in soup.find_all("div") if i.has_attr("data-asin")]
        print(res1)
        data_record.append(res1)
        #driver.close()

        #don't overflow website
        sleep(1)

        #increase page number
        i += 1
        if i == 3:
            print("STOP!!!")
            break
    except:
        break



标签: pythonseleniumweb-scraping

解决方案


删除目前似乎未使用的项目可能是一种可能的解决方案

import csv
import bs4
import requests
from selenium import webdriver
from time import sleep


def retrieve_asin_from(base_url, idx):
    url = base_url.format(idx)
    r = requests.get(url)
    soup = bs4.BeautifulSoup(r.content, 'html.parser')

    with webdriver.Chrome() as driver:
        driver.get(url)
        HTML1 = driver.page_source
        soup = bs4.BeautifulSoup(HTML1, "html.parser")
        res1 = [i.attrs["data-asin"]
                for i in soup.find_all("div") if i.has_attr("data-asin")]
    sleep(1)
    return res1


url = "https://www.amazon.es/s?k=doll&i=toys&rh=n%3A599385031&dc&page={}"
data_record = [retrieve_asin_from(url, i) for i in range(1, 4)]

combined_data_record = combine_records(data_record) # fcn to write

with open('asin_data.csv', 'w', newline='') as fd:
    csvfile = csv.writer(fd)
    csvfile.writerows(combined_data_record)

推荐阅读