diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-01-22 06:10:13 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 15:10:38 -0800 |
commit | e31e0bdc7e7fb9a4b09d2f3266c035a18fdcee9d (patch) | |
tree | 30e25f733781cf80aa9fef0d58ff3476424cb9b3 /net/ipv4 | |
parent | b2fd5321dd160ef309dfb6cfc78ed8de4a830659 (diff) | |
download | kernel_samsung_smdk4412-e31e0bdc7e7fb9a4b09d2f3266c035a18fdcee9d.zip kernel_samsung_smdk4412-e31e0bdc7e7fb9a4b09d2f3266c035a18fdcee9d.tar.gz kernel_samsung_smdk4412-e31e0bdc7e7fb9a4b09d2f3266c035a18fdcee9d.tar.bz2 |
[NETNS][FRAGS]: Make thresholds work in namespaces.
This is the same as with the timeout variable.
Currently, after exceeding the high threshold _all_
the fragments are evicted, but it will be fixed in
later patch.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/inet_fragment.c | 2 | ||||
-rw-r--r-- | net/ipv4/ip_fragment.c | 26 |
2 files changed, 14 insertions, 14 deletions
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 9da9679..5ab399c 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -153,7 +153,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f) struct inet_frag_queue *q; int work, evicted = 0; - work = atomic_read(&nf->mem) - f->ctl->low_thresh; + work = atomic_read(&nf->mem) - nf->low_thresh; while (work > 0) { read_lock(&f->lock); if (list_empty(&f->lru_list)) { diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 70d241c..80c2c19 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -75,14 +75,6 @@ struct ipq { }; static struct inet_frags_ctl ip4_frags_ctl __read_mostly = { - /* - * Fragment cache limits. We will commit 256K at one time. Should we - * cross that limit we will prune down to 192K. This should cope with - * even the most extreme cases without allowing an attacker to - * measurably harm machine performance. - */ - .high_thresh = 256 * 1024, - .low_thresh = 192 * 1024, .secret_interval = 10 * 60 * HZ, }; @@ -582,7 +574,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) net = skb->dev->nd_net; /* Start by cleaning up the memory. */ - if (atomic_read(&net->ipv4.frags.mem) > ip4_frags_ctl.high_thresh) + if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) ip_evictor(net); /* Lookup (or create) queue header */ @@ -610,7 +602,7 @@ static struct ctl_table ip4_frags_ctl_table[] = { { .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH, .procname = "ipfrag_high_thresh", - .data = &ip4_frags_ctl.high_thresh, + .data = &init_net.ipv4.frags.high_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec @@ -618,7 +610,7 @@ static struct ctl_table ip4_frags_ctl_table[] = { { .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, .procname = "ipfrag_low_thresh", - .data = &ip4_frags_ctl.low_thresh, + .data = &init_net.ipv4.frags.low_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec @@ -663,8 +655,8 @@ static int ip4_frags_ctl_register(struct net *net) if (table == NULL) goto err_alloc; - table[0].mode &= ~0222; - table[1].mode &= ~0222; + table[0].data = &net->ipv4.frags.high_thresh; + table[1].data = &net->ipv4.frags.low_thresh; table[2].data = &net->ipv4.frags.timeout; table[3].mode &= ~0222; table[4].mode &= ~0222; @@ -706,6 +698,14 @@ static inline void ip4_frags_ctl_unregister(struct net *net) static int ipv4_frags_init_net(struct net *net) { /* + * Fragment cache limits. We will commit 256K at one time. Should we + * cross that limit we will prune down to 192K. This should cope with + * even the most extreme cases without allowing an attacker to + * measurably harm machine performance. + */ + net->ipv4.frags.high_thresh = 256 * 1024; + net->ipv4.frags.low_thresh = 192 * 1024; + /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival * by TTL. |