首页 > 解决方案 > Apache HttpClient 不使用分页链接获取页面内容。我得到 200 状态但 html 没有内容

问题描述

我正在尝试使用 Apache HttpClient 对内容页面进行网络爬网。使用分页链接请求下一页时,我得到状态 200,但 HTML 在正文中显示 500 并且没有内容。即使使用分页链接,邮递员也能正常工作并获取内容。

主班

public static void main(String[] args) {
        String url = "https://www.cochranelibrary.com/cdsr/reviews/topics";
        MyContentFetcher myContentFetcher = new MyContentFetcher();
        MyParser myParser = new MyParser();
        try {
            // Load Topic list page
            String topicsPage = myContentFetcher.loadHTML(url);

            // Getting all the topics.
            Map<Integer, MyNode> topics = myParser.getTopicList(topicsPage);

            // Print all the topics and ask user to choose one
            for (int id : topics.keySet())
                System.out.println("-> " + id + " <- " + topics.get(id).getTopic());
            System.out.println("********************");
            System.out.print("Enter ID number from the list above to get reviews or enter anything else to exit:\n");
            BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
            String id = reader.readLine();

            // Validate user input, get the link and topic and cout the choice.
            if (isNumber(id)) {
                int idNum = Integer.parseInt(id);
                if (idNum <= topics.size() && idNum > 0) {
                    String topic = topics.get(idNum).getTopic();
                    String link = topics.get(idNum).getLink();
                    System.out.println("You picked: " + topic + link + "\n***************************");
                    // Loading first page of reviews
                    myParser.loadReviews(myContentFetcher.loadHTML(link), topic);
                    // Getting links to other pages
                    Queue<String> paginationLinks = myParser.getLinks();

                    // --------------> WORKS FINE UNTIL HERE <--------------
                    // Problem starts here....
                    // Load list of reviews for chosen topic
                    while(!paginationLinks.isEmpty()) {
                        String page = myContentFetcher.loadHTML(paginationLinks.remove());
                        myParser.loadReviews(page, topic);
                    }
                }
            }
            System.out.println("Exiting...");

        } catch (IOException e) {
            System.out.println("There was a problem...");
        }

!!!!这是获取 HTML 的类。我可能在这里做错了什么......

import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;

import java.io.IOException;

import java.util.Scanner;

public class MyContentFetcher {

    public MyContentFetcher() {
    }

    String loadHTML(String url) throws IOException {
        // Create configurations for
        RequestConfig config = RequestConfig.custom()
                .setCircularRedirectsAllowed(true)
                .setCookieSpec(CookieSpecs.STANDARD)
                .build();
        // Creating a HttpClient object
        CloseableHttpClient httpClient = HttpClients.custom()
                .setDefaultRequestConfig(config)
                .build();
        // Creating a HttpGet object
        HttpGet httpget = new HttpGet(url);
        httpget.setHeader("User-Agent", "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM4.171019.021.D1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36 EdgA/42.0.0.2057");
        CloseableHttpResponse httpResponse = httpClient.execute(httpget);
        Scanner sc = new Scanner(httpResponse.getEntity().getContent());
        StringBuilder page = new StringBuilder("");
        while(sc.hasNext())
            page.append(sc.nextLine()).append(" ");
        httpResponse.close();
        httpClient.close();
        return page.toString();
    }
}

这是解析器。解析器没有任何问题(完全可以根据需要解析)

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;

import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;

public class MyParser {

    private Map<String, String> topics;
    private Document htmlPage;
    private Element reviewBlock;

    public MyParser(){}

    // Loads all topics from the Cochrane Library into a map -> (Topic Name, Link)
    public Map<Integer, MyNode> getTopicList(String page) {
        Map<Integer, MyNode> topics= new HashMap<Integer, MyNode>();
        htmlPage = Jsoup.parse(page);
        // Get 'a' element that is inside 'li' with a class name of browse-by-list-item
        int i = 1;
        MyNode info;
        for(Element element : htmlPage.body().select("li.browse-by-list-item > a")) {
            info = new MyNode(element.select("button").text(),
                    element.select("a").attr("href").trim());
            topics.put(i, info);
            i++;
        }
        return topics;
    }

    // Loads Reviews
    public void loadReviews(String page, String topic) throws IOException {
        htmlPage = Jsoup.parse(page);
        // Get all review blocks
        System.out.println("**************************\n" + page + "\n**************************\n");
        for(Element element : htmlPage.body().select(".search-results-item-body")){
            reviewBlock = element;
            String review = getLink() + " | " + topic + " | " + getTitle() + " | " + getAuthor() + " | " + getDate();
            System.out.println(review);
        }
    }

    Queue<String> getLinks(){
        System.out.println("GETTING LINKS");
        Queue<String> links = new LinkedList<>();
        for(Element element : htmlPage.body().select("li.pagination-page-list-item > a")) {
            links.add(element.attr("href"));
        }
        return links;
    }

    private String getLink(){
        return "https://www.cochranelibrary.com" + reviewBlock.select("a").attr("href");
    }

    public String getTitle(){
        return reviewBlock.selectFirst("a").text();
    }

    public String getAuthor(){
        return reviewBlock.selectFirst("div.search-result-authors").text();
    }

    public String getDate(){
        String result = reviewBlock.select("div.search-result-date > div").text();
        try {
            SimpleDateFormat fmt = new SimpleDateFormat("dd MMMM yyyy", Locale.US);
            Date d = fmt.parse(result);
            fmt.applyPattern("yyyy-MM-dd");
            result = fmt.format(d);
        } catch (ParseException e) {
            System.out.println("Failed parsing the date...");
        }
        return result;
    }

标签: javahttpclientapache-httpclient-4.xapache-httpcomponents

解决方案


为了解决这个问题,我创建了一个会话并使用 cookie 存储,并在每次获取页面后使用 CloseableHttpResponse 关闭响应。这是使其工作的代码片段:

RequestConfig config = RequestConfig.custom()
                .setCircularRedirectsAllowed(true)
                .build();
        httpClient = HttpClients.custom()
                .setDefaultRequestConfig(config)
                .setMaxConnPerRoute(100)
                .build();
        CookieStore cookieStore = new BasicCookieStore();
        httpContext = new BasicHttpContext();
        httpContext.setAttribute(HttpClientContext.COOKIE_STORE, cookieStore);

HttpGet httpget = new HttpGet(url);
        httpget.setHeader("User-Agent", "Whatever");
        StringBuilder page = new StringBuilder("");
        try {
            CloseableHttpResponse response = httpClient.execute(httpget, httpContext);
            System.out.println(response.getStatusLine());
            Scanner sc = new Scanner(response.getEntity().getContent());
            while (sc.hasNext())
                page.append(sc.nextLine()).append(" ");
            sc.close();
            response.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        return page.toString();

推荐阅读