aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-21 19:57:11 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-11 09:03:48 -0800
commit8d15569e14cfcf9151e9e3b4c0cb98369943a2bb (patch)
tree0c066e73c8649a70c11191c220c7105f141469a9 /net
parentffd34fcbce326a88668075b9e5480cb301ac6a78 (diff)
downloadkernel_samsung_smdk4412-8d15569e14cfcf9151e9e3b4c0cb98369943a2bb.zip
kernel_samsung_smdk4412-8d15569e14cfcf9151e9e3b4c0cb98369943a2bb.tar.gz
kernel_samsung_smdk4412-8d15569e14cfcf9151e9e3b4c0cb98369943a2bb.tar.bz2
tcp: RFC 5961 5.2 Blind Data Injection Attack Mitigation
[ Upstream commit 354e4aa391ed50a4d827ff6fc11e0667d0859b25 ] RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] All TCP stacks MAY implement the following mitigation. TCP stacks that implement this mitigation MUST add an additional input check to any incoming segment. The ACK value is considered acceptable only if it is in the range of ((SND.UNA - MAX.SND.WND) <= SEG.ACK <= SND.NXT). All incoming segments whose ACK value doesn't satisfy the above condition MUST be discarded and an ACK sent back. Move tcp_send_challenge_ack() before tcp_ack() to avoid a forward declaration. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Jerry Chu <hkchu@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ab564f6..56545b9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3636,6 +3636,24 @@ static int tcp_process_frto(struct sock *sk, int flag)
return 0;
}
+/* RFC 5961 7 [ACK Throttling] */
+static void tcp_send_challenge_ack(struct sock *sk)
+{
+ /* unprotected vars, we dont care of overwrites */
+ static u32 challenge_timestamp;
+ static unsigned int challenge_count;
+ u32 now = jiffies / HZ;
+
+ if (now != challenge_timestamp) {
+ challenge_timestamp = now;
+ challenge_count = 0;
+ }
+ if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ tcp_send_ack(sk);
+ }
+}
+
/* This routine deals with incoming acks, but not outgoing ones. */
static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
{
@@ -3652,8 +3670,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* If the ack is older than previous acks
* then we can probably ignore it.
*/
- if (before(ack, prior_snd_una))
+ if (before(ack, prior_snd_una)) {
+ /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
+ if (before(ack, prior_snd_una - tp->max_window)) {
+ tcp_send_challenge_ack(sk);
+ return -1;
+ }
goto old_ack;
+ }
/* If the ack includes data we haven't sent yet, discard
* this segment (RFC793 Section 3.9).
@@ -5168,23 +5192,6 @@ out:
}
#endif /* CONFIG_NET_DMA */
-static void tcp_send_challenge_ack(struct sock *sk)
-{
- /* unprotected vars, we dont care of overwrites */
- static u32 challenge_timestamp;
- static unsigned int challenge_count;
- u32 now = jiffies / HZ;
-
- if (now != challenge_timestamp) {
- challenge_timestamp = now;
- challenge_count = 0;
- }
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
- tcp_send_ack(sk);
- }
-}
-
/* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here.
*/