aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/datagram.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-05 01:38:06 -0800
committerDavid S. Miller <davem@davemloft.net>2008-11-05 01:38:06 -0800
commit270acefafeb74ce2fe93d35b75733870bf1e11e7 (patch)
tree9368122a53b2834d2cd7894a1a316a9fde5d19ca /net/core/datagram.c
parentd99a7bd210a14001007fc5233597c78877f0a11c (diff)
downloadkernel_samsung_smdk4412-270acefafeb74ce2fe93d35b75733870bf1e11e7.zip
kernel_samsung_smdk4412-270acefafeb74ce2fe93d35b75733870bf1e11e7.tar.gz
kernel_samsung_smdk4412-270acefafeb74ce2fe93d35b75733870bf1e11e7.tar.bz2
net: sk_free_datagram() should use sk_mem_reclaim_partial()
I noticed a contention on udp_memory_allocated on regular UDP applications. While tcp_memory_allocated is seldom used, it appears each incoming UDP frame is currently touching udp_memory_allocated when queued, and when received by application. One possible solution is to use sk_mem_reclaim_partial() instead of sk_mem_reclaim(), so that we keep a small reserve (less than one page) of memory for each UDP socket. We did something very similar on TCP side in commit 9993e7d313e80bdc005d09c7def91903e0068f07 ([TCP]: Do not purge sk_forward_alloc entirely in tcp_delack_timer()) A more complex solution would need to convert prot->memory_allocated to use a percpu_counter with batches of 64 or 128 pages. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/datagram.c')
-rw-r--r--net/core/datagram.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ee63184..5e2ac0c 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -209,7 +209,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
- sk_mem_reclaim(sk);
+ sk_mem_reclaim_partial(sk);
}
/**
@@ -248,8 +248,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
spin_unlock_bh(&sk->sk_receive_queue.lock);
}
- kfree_skb(skb);
- sk_mem_reclaim(sk);
+ skb_free_datagram(sk, skb);
return err;
}