aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Marx <joerg.marx@secunet.com>2010-05-20 15:55:30 +0200
committerPatrick McHardy <kaber@trash.net>2010-05-20 15:55:30 +0200
commitfc350777c705a39a312728ac5e8a6f164a828f5d (patch)
tree62aa121cd62e416a505d35de9b5d77ab8ae89f66
parenta1d7c1b4b8dfbc5ecadcff9284d64bb6ad4c0196 (diff)
downloadkernel_samsung_smdk4412-fc350777c705a39a312728ac5e8a6f164a828f5d.zip
kernel_samsung_smdk4412-fc350777c705a39a312728ac5e8a6f164a828f5d.tar.gz
kernel_samsung_smdk4412-fc350777c705a39a312728ac5e8a6f164a828f5d.tar.bz2
netfilter: nf_conntrack: fix a race in __nf_conntrack_confirm against nf_ct_get_next_corpse()
This race was triggered by a 'conntrack -F' command running in parallel to the insertion of a hash for a new connection. Losing this race led to a dead conntrack entry effectively blocking traffic for a particular connection until timeout or flushing the conntrack hashes again. Now the check for an already dying connection is done inside the lock. Signed-off-by: Joerg Marx <joerg.marx@secunet.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--net/netfilter/nf_conntrack_core.c10
2 files changed, 11 insertions, 1 deletions
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index dffde8e..3d7524f 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -61,7 +61,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
int ret = NF_ACCEPT;
if (ct && ct != &nf_conntrack_untracked) {
- if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
+ if (!nf_ct_is_confirmed(ct))
ret = __nf_conntrack_confirm(skb);
if (likely(ret == NF_ACCEPT))
nf_ct_deliver_cached_events(ct);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b83c530..eeeb8bc 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -424,6 +424,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
spin_lock_bh(&nf_conntrack_lock);
+ /* We have to check the DYING flag inside the lock to prevent
+ a race against nf_ct_get_next_corpse() possibly called from
+ user context, else we insert an already 'dead' hash, blocking
+ further use of that particular connection -JM */
+
+ if (unlikely(nf_ct_is_dying(ct))) {
+ spin_unlock_bh(&nf_conntrack_lock);
+ return NF_ACCEPT;
+ }
+
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */