diff options
author | Julian Anastasov <ja@ssi.bg> | 2015-07-09 09:59:10 +0300 |
---|---|---|
committer | Ben Hutchings <ben@decadent.org.uk> | 2015-08-12 16:33:21 +0200 |
commit | 9191ab2f2b06b8f5ff9011a1b0fa62bd8adb7981 (patch) | |
tree | c3f4de612ded59a1040b2848ff17036b1d8ad5d0 /net | |
parent | 78b6803a1961369d0b8350ff1f99b7375bbd7a8f (diff) | |
download | kernel_samsung_smdk4412-9191ab2f2b06b8f5ff9011a1b0fa62bd8adb7981.zip kernel_samsung_smdk4412-9191ab2f2b06b8f5ff9011a1b0fa62bd8adb7981.tar.gz kernel_samsung_smdk4412-9191ab2f2b06b8f5ff9011a1b0fa62bd8adb7981.tar.bz2 |
net: call rcu_read_lock early in process_backlog
commit 2c17d27c36dcce2b6bf689f41a46b9e909877c21 upstream.
Incoming packet should be either in backlog queue or
in RCU read-side section. Otherwise, the final sequence of
flush_backlog() and synchronize_net() may miss packets
that can run without device reference:
CPU 1 CPU 2
skb->dev: no reference
process_backlog:__skb_dequeue
process_backlog:local_irq_enable
on_each_cpu for
flush_backlog => IPI(hardirq): flush_backlog
- packet not found in backlog
CPU delayed ...
synchronize_net
- no ongoing RCU
read-side sections
netdev_run_todo,
rcu_barrier: no
ongoing callbacks
__netif_receive_skb_core:rcu_read_lock
- too late
free dev
process packet for freed dev
Fixes: 6e583ce5242f ("net: eliminate refcounting in backlog queue")
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: David S. Miller <davem@davemloft.net>
[bwh: Backported to 3.2:
- Adjust context
- No need to rename the label in __netif_receive_skb()]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 42895aa..7f43202 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3261,8 +3261,6 @@ static int __netif_receive_skb(struct sk_buff *skb) pt_prev = NULL; - rcu_read_lock(); - another_round: __this_cpu_inc(softnet_data.processed); @@ -3357,7 +3355,6 @@ ncls: } out: - rcu_read_unlock(); return ret; } @@ -3378,34 +3375,31 @@ out: */ int netif_receive_skb(struct sk_buff *skb) { + int ret; + if (netdev_tstamp_prequeue) net_timestamp_check(skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; + rcu_read_lock(); + #ifdef CONFIG_RPS { struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu, ret; - - rcu_read_lock(); - - cpu = get_rps_cpu(skb->dev, skb, &rflow); + int cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - } else { - rcu_read_unlock(); - ret = __netif_receive_skb(skb); + return ret; } - - return ret; } -#else - return __netif_receive_skb(skb); #endif + ret = __netif_receive_skb(skb); + rcu_read_unlock(); + return ret; } EXPORT_SYMBOL(netif_receive_skb); @@ -3796,8 +3790,10 @@ static int process_backlog(struct napi_struct *napi, int quota) unsigned int qlen; while ((skb = __skb_dequeue(&sd->process_queue))) { + rcu_read_lock(); local_irq_enable(); __netif_receive_skb(skb); + rcu_read_unlock(); local_irq_disable(); input_queue_head_incr(sd); if (++work >= quota) { |