因此,我尝试使用jsoup来刮除图像的Reddit,但当我刮除某些子Reddit(如/r/wallpaper)时,我遇到了一个429错误,我想知道如何修复它。完全理解这段代码很糟糕,这是一个很普通的问题,但我对此完全陌生。无论如何:
import java.io.IOException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.io.*;
import java.net.URL;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.io.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Attributes;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.IOException;
import java.net.URL;
import java.util.Scanner;
public class javascraper{
public static void main (String[]args) throws MalformedURLException
{
Scanner scan = new Scanner (System.in);
System.out.println("Where do you want to store the files?");
String folderpath = scan.next();
System.out.println("What subreddit do you want to scrape?");
String subreddit = scan.next();
subreddit = ("http://reddit.com/r/" + subreddit);
new File(folderpath + "/" + subreddit).mkdir();
//test
try{
//gets http protocol
Document doc = Jsoup.connect(subreddit).timeout(0).get();
//get page title
String title = doc.title();
System.out.println("title : " + title);
//get all links
Elements links = doc.select("a[href]");
for(Element link : links){
//get value from href attribute
String checkLink = link.attr("href");
Elements images = doc.select("img[src~=(?i)\\.(png|jpe?g|gif)]");
if (imgCheck(checkLink)){ // checks to see if img link j
System.out.println("link : " + link.attr("href"));
downloadImages(checkLink, folderpath);
}
}
}
catch (IOException e){
e.printStackTrace();
}
}
public static boolean imgCheck(String http){
String png = ".png";
String jpg = ".jpg";
String jpeg = "jpeg"; // no period so checker will only check last four characaters
String gif = ".gif";
int length = http.length();
if (http.contains(png)|| http.contains("gfycat") || http.contains(jpg)|| http.contains(jpeg) || http.contains(gif)){
return true;
}
else{
return false;
}
}
private static void downloadImages(String src, String folderpath) throws IOException{
String folder = null;
//Exctract the name of the image from the src attribute
int indexname = src.lastIndexOf("/");
if (indexname == src.length()) {
src = src.substring(1, indexname);
}
indexname = src.lastIndexOf("/");
String name = src.substring(indexname, src.length());
System.out.println(name);
//Open a URL Stream
URL url = new URL(src);
InputStream in = url.openStream();
OutputStream out = new BufferedOutputStream(new FileOutputStream( folderpath+ name));
for (int b; (b = in.read()) != -1;) {
out.write(b);
}
out.close();
in.close();
}
}
您的问题是由您的抓取工具违反了红迪的API规则引起的。错误 429 表示“请求太多” – 您请求的页面太多太快。
您可以每2秒钟发出一个请求,还需要设置一个合适的用户代理(他们推荐的格式是
要修复它,首先,将其添加到类的开头,在main方法之前:
public static final String USER_AGENT = "<PUT YOUR USER AGENT HERE>";
(确保指定实际的用户代理)。
然后,更改它(在< code>downloadImages中)
URL url = new URL(src);
InputStream in = url.openStream();
到此:
URLConnection connection = (new URL(src)).openConnection();
Thread.sleep(2000); //Delay to comply with rate limiting
connection.setRequestProperty("User-Agent", USER_AGENT);
InputStream in = connection.getInputStream();
您还需要更改这一点(在< code>main中)
Document doc = Jsoup.connect(subreddit).timeout(0).get();
到此:
Document doc = Jsoup.connect(subreddit).userAgent(USER_AGENT).timeout(0).get();
然后,您的代码应该停止运行该错误。
请注意,使用reddit的API(IE, /r/subreddit.json而不是 /r/subreddit)可能会使这个项目更容易,但这不是必需的,您当前的代码可以工作。
你可以查看维基百科,429状态码告诉你你有太多的请求:
用户在给定时间内发送了太多请求。旨在与速率限制方案一起使用。
解决办法是放慢刮板的速度。有一些方法可以做到这一点,其中之一就是使用睡眠。