首页 > 解决方案 > 如何仅返回使用 POST 请求发布的今天和昨天的信息

问题描述

我需要获取今天和前一天发布的信息。此外,当将其导入 csv 文件时,它只打印第一列而不是剩余的列。

URL:https ://e-mehkeme.gov.az/Public/Cases 存储在 html 中的日期为<td style="width:95px;text-align:center">28.10.2019</td>

import requests, re
from bs4 import BeautifulSoup as bs
import csv

request_headers = {
    'authority': 'e-mehkeme.gov.az',
    'method': 'POST',
    'path': '/Public/Cases',
    'scheme': 'https',
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
              'application/signed-exchange;v=b3',
    'accept-encoding': 'gzip, deflate, br',
    'accept-language': 'en,en-GB;q=0.9',
    'cache-control': 'max-age=0',
    'content-length': '66',
    'content-type': 'application/x-www-form-urlencoded',
    'origin': 'https://e-mehkeme.gov.az',
    'referer': 'https://e-mehkeme.gov.az/Public/Cases',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/75.0.3770.142 Safari/537.36',
    }

voens = {'3100608381',
         }

form_data = {
    'CourtId': '',
    'CaseNo': '',
    'DocFin': '',
    'DocSeries': '',
    'DocNumber': '',
    'VOEN': voens,
    'button': 'Search',
}

url = 'https://e-mehkeme.gov.az/Public/Cases?courtid='

response = requests.post(url, data=form_data, headers=request_headers)
s = bs(response.content, 'lxml')

# PRINT THE CONTENTS OF EACH SEARCH!
for voen in voens:
    form_data['VOEN'] = voen
    r = requests.post('https://e-mehkeme.gov.az/Public/Cases', data=form_data)
    soup = bs(r.text, 'lxml')
    ids = [i['value'] for i in soup.select('.casedetail')]
    for i in ids:
        r = requests.get(f'https://e-mehkeme.gov.az/Public/CaseDetail?caseId={i}')
        soup = bs(r.content, 'lxml')
        output = [re.sub('\s+', ' ', i.text.strip()) for i in soup.select('[colspan="4"]')]
        print(output)
    with open('courtSearch.csv', 'w', newline='', encoding='utf-8') as myfile:
        writer = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        writer.writerow(output)

期望的输出:

在此处输入图像描述

标签: pythonpython-3.xweb-scrapingbeautifulsouppython-requests

解决方案


以下使用稍微不同的 url 构造,因此您可以使用 GET 请求并轻松收集每个voen. 我在每个请求期间收集字符串日期和 caseIds(以后的请求需要)。然后,我使用掩码(对于感兴趣的日子,例如今天和昨天,转换为与网站上格式相同的字符串)仅过滤所需日期范围内的 id。然后我循环过滤列表并发出弹出窗口信息的请求。

在代码中,您还可以看到注释掉的部分。其中一个向您显示从每个页面检索到的结果

#print(pd.read_html(str(soup.select_one('#Cases')))[0]) ##view table

我正在拆分标题短语(因此假设这些是常规的),以便我可以将每个字符串从行拆分到适当的输出列。

可能需要 bs4 4.7.1 +

import requests,re, csv
from bs4 import BeautifulSoup as bs
from datetime import datetime, timedelta
import pandas as pd

headers = ['Ətraflı məlumat: ', 'Cavabdeh: ', 'İddiaçı: ', 'İşin mahiyyəti ']
voens = ['2002283071','1303450301', '1700393071']
number_of_past_days_plus_today = 2
mask = [datetime.strftime(datetime.now() - timedelta(day_no), '%d.%m.%Y') for day_no in range(0, number_of_past_days_plus_today)]
ids = []
table_dates = []

with requests.Session() as s:
    for voen in voens:
        #print(voen)  ##view voen
        page = 1
        while True:
            r = s.get(f'https://e-mehkeme.gov.az/Public/Cases?page={page}&voen={voen}') #to get all pages of results
            soup = bs(r.text, 'lxml')
            ids.extend([i['value'] for i in soup.select('.casedetail')])
            #print(pd.read_html(str(soup.select_one('#Cases')))[0]) ##view table
            table_dates.extend([i.text.strip() for i in soup.select('#Cases  td:nth-child(2):not([colspan])')])

            if soup.select_one('[rel=next]') is None:
                break
            page+=1

    pairs = list(zip(table_dates,ids))
    filtered = [i for i in pairs if i[0] in mask]
    #print(100*'-') ##spacing
    #print(filtered)  ##view final filtered list of ids
    results = []
    for j in filtered:
        r = s.get(f'https://e-mehkeme.gov.az/Public/CaseDetail?caseId={j[1]}')
        soup = bs(r.content, 'lxml')     
        line = ' '.join([re.sub('\s+',' ',i.text.strip()) for i in soup.select('[colspan="4"]')])
        row = re.split('|'.join(headers),line)
        results.append(row[1:])

with open("results.csv", "w", encoding="utf-8-sig", newline='') as csv_file:
    w = csv.writer(csv_file, delimiter = ",", quoting=csv.QUOTE_MINIMAL)
    w.writerow(headers)
    for row in results:
        w.writerow(row)

我搜索了多个分隔符的拆分,并使用了@Jonathan here给出的想法。因此,对该用户的信用表示赞赏。


推荐阅读