Better: refactor rate-limiting logic and apply to destination hosts too

This commit is contained in:
Sean Owen 2017-10-03 12:39:58 +01:00
parent 65d2b163eb
commit 88120e298d
4 changed files with 143 additions and 57 deletions

View file

@ -59,6 +59,8 @@ import java.util.EnumSet;
import java.util.Locale;
import java.util.Map;
import java.util.ResourceBundle;
import java.util.Timer;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
@ -107,6 +109,8 @@ public final class DecodeServlet extends HttpServlet {
}
private Iterable<String> blockedURLSubstrings;
private Timer timer;
private DoSTracker destHostTracker;
@Override
public void init(ServletConfig servletConfig) throws ServletException {
@ -125,6 +129,16 @@ public final class DecodeServlet extends HttpServlet {
}
log.info("Blocking URIs containing: " + blockedURLSubstrings);
}
timer = new Timer("DecodeServlet");
destHostTracker = new DoSTracker(timer, 500, TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES), 10_000);
}
@Override
public void destroy() {
if (timer != null) {
timer.cancel();
}
}
@Override
@ -197,6 +211,10 @@ public final class DecodeServlet extends HttpServlet {
return;
}
if (destHostTracker.isBanned(imageURL.getHost())) {
errorResponse(request, response, "badurl");
}
HttpURLConnection connection;
try {
connection = (HttpURLConnection) imageURL.openConnection();

View file

@ -26,14 +26,9 @@ import javax.servlet.annotation.WebFilter;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Logger;
/**
* A simplistic {@link Filter} that rejects requests from hosts that are sending too many
@ -44,51 +39,13 @@ import java.util.logging.Logger;
@WebFilter({"/w/decode", "/w/chart"})
public final class DoSFilter implements Filter {
private static final Logger log = Logger.getLogger(DoSFilter.class.getName());
private static final int MAX_ACCESSES_PER_IP_PER_TIME = 50;
private static final long MAX_ACCESSES_TIME_MS = TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES);
private static final int MAX_RECENT_ACCESS_MAP_SIZE = 10_000;
private final Map<String,AtomicLong> numRecentAccesses;
private Timer timer;
public DoSFilter() {
numRecentAccesses = new LinkedHashMap<String,AtomicLong>() {
@Override
protected boolean removeEldestEntry(Map.Entry<String,AtomicLong> eldest) {
return size() > MAX_RECENT_ACCESS_MAP_SIZE;
}
};
}
private DoSTracker sourceAddrTracker;
@Override
public void init(FilterConfig filterConfig) {
timer = new Timer("DoSFilter");
timer.scheduleAtFixedRate(
new TimerTask() {
@Override
public void run() {
synchronized (numRecentAccesses) {
// Periodically reduce allowed accesses per IP
Iterator<Map.Entry<String,AtomicLong>> accessIt = numRecentAccesses.entrySet().iterator();
while (accessIt.hasNext()) {
Map.Entry<String,AtomicLong> entry = accessIt.next();
AtomicLong count = entry.getValue();
// If number of accesses is below the threshold, remove it entirely
if (count.get() <= MAX_ACCESSES_PER_IP_PER_TIME) {
accessIt.remove();
} else {
// Else it exceeded the max, so log it (again)
log.warning("Possible DoS attack from " + entry.getKey() + " (" + count + " outstanding)");
// Reduce count of accesses held against the IP
count.getAndAdd(-MAX_ACCESSES_PER_IP_PER_TIME);
}
}
log.info("Tracking accesses from " + numRecentAccesses.size() + " IPs");
}
}
}, MAX_ACCESSES_TIME_MS, MAX_ACCESSES_TIME_MS);
sourceAddrTracker = new DoSTracker(timer, 500, TimeUnit.MILLISECONDS.convert(5, TimeUnit.MINUTES), 10_000);
timer.scheduleAtFixedRate(
new TimerTask() {
@Override
@ -115,18 +72,7 @@ public final class DoSFilter implements Filter {
if (remoteIPAddress == null) {
remoteIPAddress = request.getRemoteAddr();
}
if (remoteIPAddress == null) {
return true;
}
AtomicLong count;
synchronized (numRecentAccesses) {
count = numRecentAccesses.get(remoteIPAddress);
if (count == null) {
count = new AtomicLong();
numRecentAccesses.put(remoteIPAddress, count);
}
}
return count.incrementAndGet() > MAX_ACCESSES_PER_IP_PER_TIME;
return sourceAddrTracker.isBanned(remoteIPAddress);
}
@Override

View file

@ -0,0 +1,80 @@
/*
* Copyright 2017 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.web;
import java.util.Iterator;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Logger;
/**
* Simple class which tracks a number of actions that happen per time and can flag when an action has
* happened too frequently recently. This can be used for example to track and temporarily block access
* from certain IPs or to certain hosts.
*/
final class DoSTracker {
private static final Logger log = Logger.getLogger(DoSTracker.class.getName());
private final long maxAccessesPerTime;
private final Map<String,AtomicLong> numRecentAccesses;
DoSTracker(Timer timer, final int maxAccessesPerTime, long accessTimeMS, int maxEntries) {
this.maxAccessesPerTime = maxAccessesPerTime;
this.numRecentAccesses = new LRUMap<>(maxEntries);
timer.scheduleAtFixedRate(new TimerTask() {
@Override
public void run() {
synchronized (numRecentAccesses) {
Iterator<Map.Entry<String,AtomicLong>> accessIt = numRecentAccesses.entrySet().iterator();
while (accessIt.hasNext()) {
Map.Entry<String,AtomicLong> entry = accessIt.next();
AtomicLong count = entry.getValue();
// If number of accesses is below the threshold, remove it entirely
if (count.get() <= maxAccessesPerTime) {
accessIt.remove();
} else {
// Else it exceeded the max, so log it (again)
log.warning("Blocking " + entry.getKey() + " (" + count + " outstanding)");
// Reduce count of accesses held against the IP
count.getAndAdd(-maxAccessesPerTime);
}
}
}
}
}, accessTimeMS, accessTimeMS);
}
boolean isBanned(String event) {
if (event == null) {
return true;
}
AtomicLong count;
synchronized (numRecentAccesses) {
count = numRecentAccesses.get(event);
if (count == null) {
count = new AtomicLong();
numRecentAccesses.put(event, count);
}
}
return count.incrementAndGet() > maxAccessesPerTime;
}
}

View file

@ -0,0 +1,42 @@
/*
* Copyright 2017 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.zxing.web;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Map with maximum size that removes least-recently-accessed entries when full.
*
* @param <K> map key type
* @param <V> map value type
*/
final class LRUMap<K,V> extends LinkedHashMap<K,V> {
private final int maxSize;
LRUMap(int maxSize) {
super(100, 0.75f, true);
this.maxSize = maxSize;
}
@Override
protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
return size() > maxSize;
}
}