首页 > 解决方案 > 我的脚本不会进入下一页进行抓取

问题描述

我写了一个网页抓取代码,除了下一页活动之外,一切都很好。当我运行我的代码从网站上抓取数据时,它只是抓取第一页而不是向前抓取其他页面数据。实际上,我是使用 python 进行网络抓取的新手,所以请指导我。你能修复我的代码吗?请看一下我的代码并帮助我,谢谢

这是我的代码:

import requests
from bs4 import BeautifulSoup
#import pandas as pd
#import pandas as pd
import csv

def get_page(url):
    response = requests.get(url)
    if not response.ok:
        print('server responded:', response.status_code)
    else:
        soup = BeautifulSoup(response.text, 'html.parser') # 1. html , 2. parser
    return soup

def get_detail_page(soup):

     try:
        title = (soup.find('h1',class_="cdm_style",id=False).text)
     except:
         title = 'Empty Title'
     try:
         collection = (soup.find('td',id="metadata_collec").find('a').text)
     except:
         collection = "Empty Collection"
     try:
         author = (soup.find('td',id="metadata_creato").text)
     except:
         author = "Empty Author"
     try:
         abstract = (soup.find('td',id="metadata_descri").text)
     except:
         abstract = "Empty Abstract"
     try:
         keywords = (soup.find('td',id="metadata_keywor").text)
     except:
         keywords = "Empty Keywords"
     try:
         publishers = (soup.find('td',id="metadata_publis").text)
     except:
         publishers = "Empty Publishers"
     try:
         date_original = (soup.find('td',id="metadata_contri").text)
     except:
         date_original = "Empty Date original"
     try:
        date_digital = (soup.find('td',id="metadata_date").text)
     except:
        date_digital = "Empty Date digital"
     try:
        formatt = (soup.find('td',id="metadata_source").text)
     except:
        formatt = "Empty Format"
     try:
        release_statement = (soup.find('td',id="metadata_rights").text)
     except:
        release_statement = "Empty Realease Statement"
     try:
        library = (soup.find('td',id="metadata_librar").text)
     except:
        library = "Empty Library"
     try:
        date_created = (soup.find('td',id="metadata_dmcreated").text)
     except:
        date_created = "Empty date Created"
     data = {
         'Title'        : title.strip(),
         'Collection'   : collection.strip(),
         'Author'       : author.strip(),
         'Abstract'     : abstract.strip(),
         'Keywords'     : keywords.strip(),
         'Publishers'   : publishers.strip(),
         'Date_original': date_original.strip(),
         'Date_digital' : date_digital.strip(),
         'Format'       : formatt.strip(),
         'Release-st'   : release_statement.strip(),
         'Library'      : library.strip(),
         'Date_created' : date_created.strip()
         

     }
     return data
def get_index_data(soup):
    try:
        titles_link = soup.find_all('a',class_="body_link_11")
    except:
        titles_link = []
    else:
        titles_link_output = []
        for link in titles_link:
            try:
                item_id = link.attrs.get('item_id', None) #All titles with valid links will have an item_id
                if item_id:
                    titles_link_output.append("{}{}".format("http://cgsc.cdmhost.com",link.attrs.get('href', None)))
            except:
                continue
    return titles_link_output
def write_csv(data,url):
    with open('11_to_55.csv','a') as csvfile:
        writer = csv.writer(csvfile)
        row = [data['Title'], data['Collection'], data['Author'],
        data['Abstract'], data['Keywords'], data['Publishers'], data['Date_original'],
        data['Date_digital'], data['Format'], data['Release-st'], data['Library'],
        data['Date_created'], url]
        writer.writerow(row)
def main():
    #url = "http://cgsc.cdmhost.com/cdm/singleitem/collection/p4013coll8/id/2653/rec/1"
    #get_page(url)
    for x in range(1,4):
        mainurl = ("http://cgsc.cdmhost.com/cdm/search/collection/p4013coll8/searchterm/1/field/all/mode/all/conn/and/order/nosort/page/")
        print(x)
        url = (mainurl + str(x))
        products = get_index_data(get_page(url))
        for product in products:
            data1 = get_detail_page(get_page(product))
            write_csv(data1,product)
    #write_csv(data,url)


if __name__ == '__main__':
    main()

标签: pythonweb-scrapingbeautifulsoup

解决方案


我开始试图找出为什么它没有正确加载下一页,但在我找到答案之前,我找到了另一种获取您正在寻找的数据的方法。在页面上有一个选项可以更改要返回的结果数量。我将其更改为 10000,现在集合中的所有项目都加载在一页上。

如果这不是您想要的,而是仍然想解决页面更改问题,请告诉我,我会再看看。

cgsc.cdmhost.com/cdm/search/collection/p4013coll8/searchterm/1/field/all/mode/all/conn/and/display/10000/order/nosort/ad/asc _ _

我测试了加载索引页面,但没有测试详细页面。我不想下载整个集合。

以下是我所做的一些更改和一些建议。

  1. 不再需要循环索引页面。页面现在返回所有项目。
  2. 请具体说明您遇到了什么异常。在这种情况下,它的 AttributeError
  3. 在您的请求中添加用户代理,许多网站会阻止没有它的请求

祝你好运!

import requests
from bs4 import BeautifulSoup
#import pandas as pd
#import pandas as pd
import csv

def get_page(url):
    response = requests.get(url, headers={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.37"})
    if not response.ok:
        print('server responded:', response.status_code)
    else:
        soup = BeautifulSoup(response.text, 'html.parser') # 1. html , 2. parser
    return soup

def get_detail_page(soup):

    # Be specific with your exception capturing. 
    try:
        title = (soup.find('h1',class_="cdm_style",id=False).text)
    except AttributeError:
        title = 'Empty Title'
    try:
        collection = (soup.find('td',id="metadata_collec").find('a').text)
    except AttributeError:
        collection = "Empty Collection"
    try:
        author = (soup.find('td',id="metadata_creato").text)
    except AttributeError:
        author = "Empty Author"
    try:
        abstract = (soup.find('td',id="metadata_descri").text)
    except AttributeError:
        abstract = "Empty Abstract"
    try:
        keywords = (soup.find('td',id="metadata_keywor").text)
    except AttributeError:
        keywords = "Empty Keywords"
    try:
        publishers = (soup.find('td',id="metadata_publis").text)
    except AttributeError:
        publishers = "Empty Publishers"
    try:
        date_original = (soup.find('td',id="metadata_contri").text)
    except AttributeError:
        date_original = "Empty Date original"
    try:
        date_digital = (soup.find('td',id="metadata_date").text)
    except AttributeError:
        date_digital = "Empty Date digital"
    try:
        formatt = (soup.find('td',id="metadata_source").text)
    except AttributeError:
        formatt = "Empty Format"
    try:
        release_statement = (soup.find('td',id="metadata_rights").text)
    except AttributeError:
        release_statement = "Empty Realease Statement"
    try:
        library = (soup.find('td',id="metadata_librar").text)
    except AttributeError:
        library = "Empty Library"
    try:
        date_created = (soup.find('td',id="metadata_dmcreated").text)
    except AttributeError:
        date_created = "Empty date Created"
    data = {
    'Title'        : title.strip(),
    'Collection'   : collection.strip(),
    'Author'       : author.strip(),
    'Abstract'     : abstract.strip(),
    'Keywords'     : keywords.strip(),
    'Publishers'   : publishers.strip(),
    'Date_original': date_original.strip(),
    'Date_digital' : date_digital.strip(),
    'Format'       : formatt.strip(),
    'Release-st'   : release_statement.strip(),
    'Library'      : library.strip(),
    'Date_created' : date_created.strip()
    }
    return data

def get_index_data(soup):
    try:
        titles_link = soup.find_all('a',class_="body_link_11")
    except:
        titles_link = []
    else:
        titles_link_output = []
        for link in titles_link:
            try:
                item_id = link.attrs.get('item_id', None) #All titles with valid links will have an item_id
                if item_id:
                    titles_link_output.append("{}{}".format("http://cgsc.cdmhost.com",link.attrs.get('href', None)))
            except:
                continue
    return titles_link_output

def write_csv(data,url):
    with open('11_to_55.csv','a') as csvfile:
        writer = csv.writer(csvfile)
        row = [data['Title'], data['Collection'], data['Author'],
        data['Abstract'], data['Keywords'], data['Publishers'], data['Date_original'],
        data['Date_digital'], data['Format'], data['Release-st'], data['Library'],
        data['Date_created'], url]
        writer.writerow(row)

def main():
    main_url = ("http://cgsc.cdmhost.com/cdm/search/collection/p4013coll8/searchterm/1/field/all/mode/all/conn/and/display/10000/order/nosort/ad/asc")
    products = get_index_data(get_page(main_url))
    print(products)
#     for product in products:
#         data1 = get_detail_page(get_page(product))
#         write_csv(data1,product)
#     write_csv(data,url)


if __name__ == '__main__':
    main()

推荐阅读