diff options
Diffstat (limited to 'net')
38 files changed, 339 insertions, 180 deletions
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index ceadfcf..450eb02 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -581,15 +581,6 @@ static int hidp_session(void *arg) hidp_del_timer(session); - fput(session->intr_sock->file); - - wait_event_timeout(*(ctrl_sk->sk_sleep), - (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); - - fput(session->ctrl_sock->file); - - __hidp_unlink_session(session); - if (session->input) { input_unregister_device(session->input); session->input = NULL; @@ -601,6 +592,15 @@ static int hidp_session(void *arg) hid_free_device(session->hid); } + fput(session->intr_sock->file); + + wait_event_timeout(*(ctrl_sk->sk_sleep), + (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); + + fput(session->ctrl_sock->file); + + __hidp_unlink_session(session); + up_write(&hidp_session_sem); kfree(session); diff --git a/net/core/dev.c b/net/core/dev.c index 2609062..ee051bb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2009,6 +2009,7 @@ static void net_rx_action(struct softirq_action *h) } } out: + local_irq_enable(); #ifdef CONFIG_NET_DMA /* * There may not be any more sk_buffs coming right now, so push @@ -2022,7 +2023,6 @@ out: rcu_read_unlock(); } #endif - local_irq_enable(); return; softnet_break: diff --git a/net/core/netevent.c b/net/core/netevent.c index 35d02c3..95f81de 100644 --- a/net/core/netevent.c +++ b/net/core/netevent.c @@ -15,6 +15,7 @@ #include <linux/rtnetlink.h> #include <linux/notifier.h> +#include <net/netevent.h> static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 758dafe..a0efdd7 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -250,22 +250,23 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) unsigned long flags; local_irq_save(flags); - if (netif_tx_trylock(dev)) { - /* try until next clock tick */ - for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; - tries > 0; --tries) { + /* try until next clock tick */ + for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; + tries > 0; --tries) { + if (netif_tx_trylock(dev)) { if (!netif_queue_stopped(dev)) status = dev->hard_start_xmit(skb, dev); + netif_tx_unlock(dev); if (status == NETDEV_TX_OK) break; - /* tickle device maybe there is some cleanup */ - netpoll_poll(np); - - udelay(USEC_PER_POLL); } - netif_tx_unlock(dev); + + /* tickle device maybe there is some cleanup */ + netpoll_poll(np); + + udelay(USEC_PER_POLL); } local_irq_restore(flags); } @@ -785,8 +786,13 @@ void netpoll_cleanup(struct netpoll *np) skb_queue_purge(&npinfo->arp_tx); skb_queue_purge(&npinfo->txq); cancel_rearming_delayed_work(&npinfo->tx_work); - flush_scheduled_work(); + /* clean after last, unfinished work */ + if (!skb_queue_empty(&npinfo->txq)) { + struct sk_buff *skb; + skb = __skb_dequeue(&npinfo->txq); + kfree_skb(skb); + } kfree(npinfo); } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7c6a34e..3943c3a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -434,8 +434,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) n->tc_verd = CLR_TC_MUNGED(n->tc_verd); C(iif); #endif - skb_copy_secmark(n, skb); #endif + skb_copy_secmark(n, skb); C(truesize); atomic_set(&n->users, 1); C(head); @@ -1706,6 +1706,11 @@ next_skb: st->stepped_offset += frag->size; } + if (st->frag_data) { + kunmap_skb_frag(st->frag_data); + st->frag_data = NULL; + } + if (st->cur_skb->next) { st->cur_skb = st->cur_skb->next; st->frag_idx = 0; @@ -2206,7 +2211,6 @@ EXPORT_SYMBOL(pskb_copy); EXPORT_SYMBOL(pskb_expand_head); EXPORT_SYMBOL(skb_checksum); EXPORT_SYMBOL(skb_clone); -EXPORT_SYMBOL(skb_clone_fraglist); EXPORT_SYMBOL(skb_copy); EXPORT_SYMBOL(skb_copy_and_csum_bits); EXPORT_SYMBOL(skb_copy_and_csum_dev); diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index c308756..6398e6e 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c @@ -456,18 +456,13 @@ void ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac, struct ieee80211softmac_network *add_net) { - struct list_head *list_ptr; - struct ieee80211softmac_network *softmac_net = NULL; + struct ieee80211softmac_network *softmac_net; - list_for_each(list_ptr, &mac->network_list) { - softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list); + list_for_each_entry(softmac_net, &mac->network_list, list) { if(!memcmp(softmac_net->bssid, add_net->bssid, ETH_ALEN)) - break; - else - softmac_net = NULL; + return; } - if(softmac_net == NULL) - list_add(&(add_net->list), &mac->network_list); + list_add(&(add_net->list), &mac->network_list); } /* Add a network to the list, with locking */ @@ -506,16 +501,13 @@ struct ieee80211softmac_network * ieee80211softmac_get_network_by_bssid_locked(struct ieee80211softmac_device *mac, u8 *bssid) { - struct list_head *list_ptr; - struct ieee80211softmac_network *softmac_net = NULL; - list_for_each(list_ptr, &mac->network_list) { - softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list); + struct ieee80211softmac_network *softmac_net; + + list_for_each_entry(softmac_net, &mac->network_list, list) { if(!memcmp(softmac_net->bssid, bssid, ETH_ALEN)) - break; - else - softmac_net = NULL; + return softmac_net; } - return softmac_net; + return NULL; } /* Get a network from the list by BSSID with locking */ @@ -537,11 +529,9 @@ struct ieee80211softmac_network * ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac, struct ieee80211softmac_essid *essid) { - struct list_head *list_ptr; - struct ieee80211softmac_network *softmac_net = NULL; + struct ieee80211softmac_network *softmac_net; - list_for_each(list_ptr, &mac->network_list) { - softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list); + list_for_each_entry(softmac_net, &mac->network_list, list) { if (softmac_net->essid.len == essid->len && !memcmp(softmac_net->essid.data, essid->data, essid->len)) return softmac_net; diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index 7ea2d98..356f067 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c @@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options { struct ip_vs_seq out_seq; /* outgoing seq. struct */ }; +struct ip_vs_sync_thread_data { + struct completion *startup; + int state; +}; + #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ) #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) #define FULL_CONN_SIZE \ @@ -751,6 +756,7 @@ static int sync_thread(void *startup) mm_segment_t oldmm; int state; const char *name; + struct ip_vs_sync_thread_data *tinfo = startup; /* increase the module use count */ ip_vs_use_count_inc(); @@ -789,7 +795,14 @@ static int sync_thread(void *startup) add_wait_queue(&sync_wait, &wait); set_sync_pid(state, current->pid); - complete((struct completion *)startup); + complete(tinfo->startup); + + /* + * once we call the completion queue above, we should + * null out that reference, since its allocated on the + * stack of the creating kernel thread + */ + tinfo->startup = NULL; /* processing master/backup loop here */ if (state == IP_VS_STATE_MASTER) @@ -801,6 +814,14 @@ static int sync_thread(void *startup) remove_wait_queue(&sync_wait, &wait); /* thread exits */ + + /* + * If we weren't explicitly stopped, then we + * exited in error, and should undo our state + */ + if ((!stop_master_sync) && (!stop_backup_sync)) + ip_vs_sync_state -= tinfo->state; + set_sync_pid(state, 0); IP_VS_INFO("sync thread stopped!\n"); @@ -812,6 +833,11 @@ static int sync_thread(void *startup) set_stop_sync(state, 0); wake_up(&stop_sync_wait); + /* + * we need to free the structure that was allocated + * for us in start_sync_thread + */ + kfree(tinfo); return 0; } @@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) { DECLARE_COMPLETION_ONSTACK(startup); pid_t pid; + struct ip_vs_sync_thread_data *tinfo; if ((state == IP_VS_STATE_MASTER && sync_master_pid) || (state == IP_VS_STATE_BACKUP && sync_backup_pid)) return -EEXIST; + /* + * Note that tinfo will be freed in sync_thread on exit + */ + tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL); + if (!tinfo) + return -ENOMEM; + IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", sizeof(struct ip_vs_sync_conn)); @@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) ip_vs_backup_syncid = syncid; } + tinfo->state = state; + tinfo->startup = &startup; + repeat: - if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) { + if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) { IP_VS_ERR("could not create fork_sync_thread due to %d... " "retrying.\n", pid); msleep_interruptible(1000); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index cd3c7e9..450f44b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1064,7 +1064,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, break; } used = recv_actor(desc, skb, offset, len); - if (used <= len) { + if (used < 0) { + if (!copied) + copied = used; + break; + } else if (used <= len) { seq += used; copied += used; offset += used; @@ -1086,7 +1090,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_rcv_space_adjust(sk); /* Clean up data we have read: This will do ACK frames. */ - if (copied) + if (copied > 0) tcp_cleanup_rbuf(sk, copied); return copied; } diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 281c9f9..dd9ef65 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -29,7 +29,7 @@ static int fast_convergence = 1; static int max_increment = 16; static int low_window = 14; static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ -static int initial_ssthresh = 100; +static int initial_ssthresh; static int smooth_part = 20; module_param(fast_convergence, int, 0644); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 1422448..ebfaac2 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -29,7 +29,7 @@ static int fast_convergence __read_mostly = 1; static int max_increment __read_mostly = 16; static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ -static int initial_ssthresh __read_mostly = 100; +static int initial_ssthresh __read_mostly; static int bic_scale __read_mostly = 41; static int tcp_friendliness __read_mostly = 1; diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 4adc47c..b2b2256 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -90,6 +90,9 @@ static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last) ca->acked = pkts_acked; + if (ktime_equal(last, net_invalid_timestamp())) + return; + rtt = ktime_to_us(net_timedelta(last)); /* ignore bogus values, this prevents wraparound in alpha math */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 74683d8..69f9f1e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -953,7 +953,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ int prior_fackets; u32 lost_retrans = 0; int flag = 0; - int dup_sack = 0; + int found_dup_sack = 0; int cached_fack_count; int i; int first_sack_index; @@ -964,20 +964,20 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ /* Check for D-SACK. */ if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { - dup_sack = 1; + found_dup_sack = 1; tp->rx_opt.sack_ok |= 4; NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1 && !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { - dup_sack = 1; + found_dup_sack = 1; tp->rx_opt.sack_ok |= 4; NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); } /* D-SACK for already forgotten data... * Do dumb counting. */ - if (dup_sack && + if (found_dup_sack && !after(ntohl(sp[0].end_seq), prior_snd_una) && after(ntohl(sp[0].end_seq), tp->undo_marker)) tp->undo_retrans--; @@ -1058,6 +1058,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ __u32 start_seq = ntohl(sp->start_seq); __u32 end_seq = ntohl(sp->end_seq); int fack_count; + int dup_sack = (found_dup_sack && (i == first_sack_index)); skb = cached_skb; fack_count = cached_fack_count; @@ -2037,7 +2038,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); - tp->left_out = tp->sacked_out; + tcp_sync_left_out(tp); if (tp->retrans_out == 0) tp->retrans_stamp = 0; @@ -2409,7 +2410,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) int acked = 0; int prior_packets = tp->packets_out; __s32 seq_rtt = -1; - ktime_t last_ackt = ktime_set(0,0); + ktime_t last_ackt = net_invalid_timestamp(); while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { @@ -2487,6 +2488,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) tcp_ack_update_rtt(sk, acked, seq_rtt); tcp_ack_packets_out(sk); + /* Is the ACK triggering packet unambiguous? */ + if (acked & FLAG_RETRANS_DATA_ACKED) + last_ackt = net_invalid_timestamp(); + if (ca_ops->pkts_acked) ca_ops->pkts_acked(sk, pkts_acked, last_ackt); } @@ -2932,6 +2937,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } + break; #ifdef CONFIG_TCP_MD5SIG case TCPOPT_MD5SIG: /* diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 97e294e..354721d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -878,6 +878,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, kfree(newkey); return -ENOMEM; } + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } if (tcp_alloc_md5sig_pool() == NULL) { kfree(newkey); @@ -1007,7 +1008,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, return -EINVAL; tp->md5sig_info = p; - + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index 43294ad..e49836c 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c @@ -266,7 +266,8 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last) struct tcp_sock *tp = tcp_sk(sk); struct lp *lp = inet_csk_ca(sk); - tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last))); + if (!ktime_equal(last, net_invalid_timestamp())) + tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last))); /* calc inference */ if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 73e19cf..e218a51 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -117,6 +117,9 @@ void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) struct vegas *vegas = inet_csk_ca(sk); u32 vrtt; + if (ktime_equal(last, net_invalid_timestamp())) + return; + /* Never allow zero rtt or baseRTT */ vrtt = ktime_to_us(net_timedelta(last)) + 1; diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 9edb340..ec854cc 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -74,6 +74,9 @@ static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last) struct veno *veno = inet_csk_ca(sk); u32 vrtt; + if (ktime_equal(last, net_invalid_timestamp())) + return; + /* Never allow zero rtt or baseRTT */ vrtt = ktime_to_us(net_timedelta(last)) + 1; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5a5f8bd..79b79f3 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2154,6 +2154,15 @@ static void addrconf_dev_config(struct net_device *dev) ASSERT_RTNL(); + if ((dev->type != ARPHRD_ETHER) && + (dev->type != ARPHRD_FDDI) && + (dev->type != ARPHRD_IEEE802_TR) && + (dev->type != ARPHRD_ARCNET) && + (dev->type != ARPHRD_INFINIBAND)) { + /* Alas, we support only Ethernet autoconfiguration. */ + return; + } + idev = addrconf_add_dev(dev); if (idev == NULL) return; @@ -2241,36 +2250,16 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) ip6_tnl_add_linklocal(idev); } -static int ipv6_hwtype(struct net_device *dev) -{ - if ((dev->type == ARPHRD_ETHER) || - (dev->type == ARPHRD_LOOPBACK) || - (dev->type == ARPHRD_SIT) || - (dev->type == ARPHRD_TUNNEL6) || - (dev->type == ARPHRD_FDDI) || - (dev->type == ARPHRD_IEEE802_TR) || - (dev->type == ARPHRD_ARCNET) || - (dev->type == ARPHRD_INFINIBAND)) - return 1; - - return 0; -} - static int addrconf_notify(struct notifier_block *this, unsigned long event, void * data) { struct net_device *dev = (struct net_device *) data; - struct inet6_dev *idev; + struct inet6_dev *idev = __in6_dev_get(dev); int run_pending = 0; - if (!ipv6_hwtype(dev)) - return NOTIFY_OK; - - idev = __in6_dev_get(dev); - switch(event) { case NETDEV_REGISTER: - if (!idev) { + if (!idev && dev->mtu >= IPV6_MIN_MTU) { idev = ipv6_add_dev(dev); if (!idev) printk(KERN_WARNING "IPv6: add_dev failed for %s\n", @@ -2279,6 +2268,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, break; case NETDEV_UP: case NETDEV_CHANGE: + if (dev->flags & IFF_SLAVE) + break; + if (event == NETDEV_UP) { if (!netif_carrier_ok(dev)) { /* device is not ready yet. */ diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index d8b3645..0358e60 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1062,7 +1062,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) pref = ra_msg->icmph.icmp6_router_pref; /* 10b is handled as if it were 00b (medium) */ if (pref == ICMPV6_ROUTER_PREF_INVALID || - in6_dev->cnf.accept_ra_rtr_pref) + !in6_dev->cnf.accept_ra_rtr_pref) pref = ICMPV6_ROUTER_PREF_MEDIUM; #endif diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4f06a51..193d9d6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -590,6 +590,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, kfree(newkey); return -ENOMEM; } + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } tcp_alloc_md5sig_pool(); if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { @@ -724,6 +725,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, return -ENOMEM; tp->md5sig_info = p; + sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index 0b02073..a8b8873 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -317,23 +317,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, } /* - * Function irlap_next_state (self, state) - * - * Switches state and provides debug information - * - */ -static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state) -{ - /* - if (!self || self->magic != LAP_MAGIC) - return; - - IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]); - */ - self->state = state; -} - -/* * Function irlap_state_ndm (event, skb, frame) * * NDM (Normal Disconnected Mode) state @@ -1086,7 +1069,6 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, } else { /* Final packet of window */ irlap_send_data_primary_poll(self, skb); - irlap_next_state(self, LAP_NRM_P); /* * Make sure state machine does not try to send @@ -1436,14 +1418,14 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, */ self->remote_busy = FALSE; + /* Stop final timer */ + del_timer(&self->final_timer); + /* * Nr as expected? */ ret = irlap_validate_nr_received(self, info->nr); if (ret == NR_EXPECTED) { - /* Stop final timer */ - del_timer(&self->final_timer); - /* Update Nr received */ irlap_update_nr_received(self, info->nr); @@ -1475,14 +1457,12 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); - - /* Final timer ??? Jean II */ + irlap_start_final_timer(self, self->final_timeout * 2); irlap_next_state(self, LAP_NRM_P); } else if (ret == NR_INVALID) { IRDA_DEBUG(1, "%s(), Received RR with " "invalid nr !\n", __FUNCTION__); - del_timer(&self->final_timer); irlap_next_state(self, LAP_RESET_WAIT); diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 3c5a68e..3013c49 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -798,16 +798,19 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) self->vs = (self->vs + 1) % 8; self->ack_required = FALSE; + irlap_next_state(self, LAP_NRM_P); irlap_send_i_frame(self, tx_skb, CMD_FRAME); } else { IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); if (self->ack_required) { irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); + irlap_next_state(self, LAP_NRM_P); irlap_send_rr_frame(self, CMD_FRAME); self->ack_required = FALSE; } else { skb->data[1] |= PF_BIT; + irlap_next_state(self, LAP_NRM_P); irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); } } diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index bb6c0fe..476c848 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -112,7 +112,7 @@ DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", local->wep_iv & 0xffffff); DEBUGFS_READONLY_FILE(tx_power_reduction, 20, "%d.%d dBm", local->hw.conf.tx_power_reduction / 10, - local->hw.conf.tx_power_reduction & 10); + local->hw.conf.tx_power_reduction % 10); DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>"); diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index 9f30ae4..91b545c 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c @@ -2592,11 +2592,17 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) read_lock(&local->sub_if_lock); list_for_each_entry(sdata, &local->sub_if_list, list) { + + /* No need to wake the master device. */ + if (sdata->dev == local->mdev) + continue; + if (sdata->type == IEEE80211_IF_TYPE_STA) { if (sdata->u.sta.associated) ieee80211_send_nullfunc(local, sdata, 0); ieee80211_sta_timer((unsigned long)sdata); } + netif_wake_queue(sdata->dev); } read_unlock(&local->sub_if_lock); @@ -2738,6 +2744,12 @@ static int ieee80211_sta_start_scan(struct net_device *dev, read_lock(&local->sub_if_lock); list_for_each_entry(sdata, &local->sub_if_list, list) { + + /* Don't stop the master interface, otherwise we can't transmit + * probes! */ + if (sdata->dev == local->mdev) + continue; + netif_stop_queue(sdata->dev); if (sdata->type == IEEE80211_IF_TYPE_STA && sdata->u.sta.associated) diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index f6fad71..6b7eaa0 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -518,7 +518,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level) CHECK_BOUND(bs, 2); len = get_len(bs); CHECK_BOUND(bs, len); - if (!base) { + if (!base || !(son->attr & DECODE)) { PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", son->name); bs->cur += len; @@ -704,6 +704,8 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level) } else { ext = 0; type = get_bits(bs, f->sz); + if (type >= f->lb) + return H323_ERROR_RANGE; } /* Write Type */ diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 3f73327..d0fe3d7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -869,8 +869,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[]) return 0; if (help->helper) - /* we had a helper before ... */ - nf_ct_remove_expectations(ct); + return -EBUSY; /* need to zero data of old helper */ memset(&help->help, 0, sizeof(help->help)); diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 7aaa8c9..1b5c6c1 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -442,6 +442,9 @@ static int sip_help(struct sk_buff **pskb, /* RTP info only in some SDP pkts */ if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 && + memcmp(dptr, "UPDATE", sizeof("UPDATE") - 1) != 0 && + memcmp(dptr, "SIP/2.0 180", sizeof("SIP/2.0 180") - 1) != 0 && + memcmp(dptr, "SIP/2.0 183", sizeof("SIP/2.0 183") - 1) != 0 && memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) { goto out; } diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 43cb3e0..482750e 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c @@ -211,7 +211,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) conn->header_size = sizeof(struct rxrpc_header); } - _leave(" = %p{%d}", conn, conn->debug_id); + _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); return conn; } diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 591c442..cc9102c 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -640,6 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb, goto efault; sp->remain -= copy; skb->mark += copy; + copied += copy; len -= copy; segment -= copy; @@ -709,6 +710,8 @@ static int rxrpc_send_data(struct kiocb *iocb, } while (segment > 0); +success: + ret = copied; out: call->tx_pending = skb; _leave(" = %d", ret); @@ -725,7 +728,7 @@ call_aborted: maybe_error: if (copied) - ret = copied; + goto success; goto out; efault: diff --git a/net/sctp/associola.c b/net/sctp/associola.c index df94e3c..498edb0 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1231,6 +1231,10 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) /* Get the lowest pmtu of all the transports. */ list_for_each(pos, &asoc->peer.transport_addr_list) { t = list_entry(pos, struct sctp_transport, transports); + if (t->pmtu_pending && t->dst) { + sctp_transport_update_pmtu(t, dst_mtu(t->dst)); + t->pmtu_pending = 0; + } if (!pmtu || (t->pathmtu < pmtu)) pmtu = t->pathmtu; } diff --git a/net/sctp/input.c b/net/sctp/input.c index 885109f..d57ff7f 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -367,24 +367,18 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t, __u32 pmtu) { - if (sock_owned_by_user(sk) || !t || (t->pathmtu == pmtu)) + if (!t || (t->pathmtu == pmtu)) return; + if (sock_owned_by_user(sk)) { + asoc->pmtu_pending = 1; + t->pmtu_pending = 1; + return; + } + if (t->param_flags & SPP_PMTUD_ENABLE) { - if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { - printk(KERN_WARNING "%s: Reported pmtu %d too low, " - "using default minimum of %d\n", - __FUNCTION__, pmtu, - SCTP_DEFAULT_MINSEGMENT); - /* Use default minimum segment size and disable - * pmtu discovery on this transport. - */ - t->pathmtu = SCTP_DEFAULT_MINSEGMENT; - t->param_flags = (t->param_flags & ~SPP_PMTUD) | - SPP_PMTUD_DISABLE; - } else { - t->pathmtu = pmtu; - } + /* Update transports view of the MTU */ + sctp_transport_update_pmtu(t, pmtu); /* Update association pmtu. */ sctp_assoc_sync_pmtu(asoc); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 84cd536..2c29394 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -844,6 +844,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) dev = dev_get_by_index(addr->v6.sin6_scope_id); if (!dev) return 0; + if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) { + dev_put(dev); + return 0; + } dev_put(dev); } af = opt->pf->af; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 4dcdabf..b1917f6 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -333,12 +333,19 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; - /* We must either be unbound, or bind to the same port. */ - if (bp->port && (snum != bp->port)) { - SCTP_DEBUG_PRINTK("sctp_do_bind:" + /* We must either be unbound, or bind to the same port. + * It's OK to allow 0 ports if we are already bound. + * We'll just inhert an already bound port in this case + */ + if (bp->port) { + if (!snum) + snum = bp->port; + else if (snum != bp->port) { + SCTP_DEBUG_PRINTK("sctp_do_bind:" " New port %d does not match existing port " "%d.\n", snum, bp->port); - return -EINVAL; + return -EINVAL; + } } if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) @@ -973,6 +980,7 @@ static int __sctp_connect(struct sock* sk, union sctp_addr *sa_addr; void *addr_buf; unsigned short port; + unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; @@ -1099,7 +1107,14 @@ static int __sctp_connect(struct sock* sk, af->to_sk_daddr(&to, sk); sk->sk_err = 0; - timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); + /* in-kernel sockets don't generally have a file allocated to them + * if all they do is call sock_create_kern(). + */ + if (sk->sk_socket->file) + f_flags = sk->sk_socket->file->f_flags; + + timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); + err = sctp_wait_for_connect(asoc, &timeo); /* Don't free association on exit. */ @@ -1655,6 +1670,9 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_free; } + if (asoc->pmtu_pending) + sctp_assoc_pending_pmtu(asoc); + /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like @@ -3365,12 +3383,13 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, sctp_assoc_t associd; int retval = 0; - if (len != sizeof(status)) { + if (len < sizeof(status)) { retval = -EINVAL; goto out; } - if (copy_from_user(&status, optval, sizeof(status))) { + len = sizeof(status); + if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } @@ -3442,12 +3461,13 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, struct sctp_transport *transport; int retval = 0; - if (len != sizeof(pinfo)) { + if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } - if (copy_from_user(&pinfo, optval, sizeof(pinfo))) { + len = sizeof(pinfo); + if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } @@ -3513,8 +3533,11 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { - if (len != sizeof(struct sctp_event_subscribe)) + if (len < sizeof(struct sctp_event_subscribe)) return -EINVAL; + len = sizeof(struct sctp_event_subscribe); + if (put_user(len, optlen)) + return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; @@ -3536,9 +3559,12 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; - if (len != sizeof(int)) + if (len < sizeof(int)) return -EINVAL; - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len)) + len = sizeof(int); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } @@ -3550,6 +3576,7 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, struct sock *sk = asoc->base.sk; struct socket *sock; struct inet_sock *inetsk; + struct sctp_af *af; int err = 0; /* An association cannot be branched off from an already peeled-off @@ -3571,8 +3598,9 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ + af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); + af->to_sk_daddr(&asoc->peer.primary_addr, sk); inetsk = inet_sk(sock->sk); - inetsk->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; inetsk->id = asoc->next_tsn ^ jiffies; *sockp = sock; @@ -3587,8 +3615,9 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval int retval = 0; struct sctp_association *asoc; - if (len != sizeof(sctp_peeloff_arg_t)) + if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; + len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; @@ -3616,6 +3645,8 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval /* Return the fd mapped to the new socket. */ peeloff.sd = retval; + if (put_user(len, optlen)) + return -EFAULT; if (copy_to_user(optval, &peeloff, len)) retval = -EFAULT; @@ -3724,9 +3755,9 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); - if (len != sizeof(struct sctp_paddrparams)) + if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; - + len = sizeof(struct sctp_paddrparams); if (copy_from_user(¶ms, optval, len)) return -EFAULT; @@ -3825,9 +3856,11 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len, struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); - if (len != sizeof(struct sctp_assoc_value)) + if (len < sizeof(struct sctp_assoc_value)) return - EINVAL; + len = sizeof(struct sctp_assoc_value); + if (copy_from_user(¶ms, optval, len)) return -EFAULT; @@ -3876,8 +3909,11 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len, */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { - if (len != sizeof(struct sctp_initmsg)) + if (len < sizeof(struct sctp_initmsg)) return -EINVAL; + len = sizeof(struct sctp_initmsg); + if (put_user(len, optlen)) + return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; @@ -3892,7 +3928,7 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len, struct list_head *pos; int cnt = 0; - if (len != sizeof(sctp_assoc_t)) + if (len < sizeof(sctp_assoc_t)) return -EINVAL; if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) @@ -3928,10 +3964,12 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, struct sctp_sock *sp = sctp_sk(sk); int addrlen; - if (len != sizeof(struct sctp_getaddrs_old)) + if (len < sizeof(struct sctp_getaddrs_old)) return -EINVAL; - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) + len = sizeof(struct sctp_getaddrs_old); + + if (copy_from_user(&getaddrs, optval, len)) return -EFAULT; if (getaddrs.addr_num <= 0) return -EINVAL; @@ -3954,7 +3992,9 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, if (cnt >= getaddrs.addr_num) break; } getaddrs.addr_num = cnt; - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &getaddrs, len)) return -EFAULT; return 0; @@ -3987,8 +4027,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, return -EINVAL; to = optval + offsetof(struct sctp_getaddrs,addrs); - space_left = len - sizeof(struct sctp_getaddrs) - - offsetof(struct sctp_getaddrs,addrs); + space_left = len - offsetof(struct sctp_getaddrs,addrs); list_for_each(pos, &asoc->peer.transport_addr_list) { from = list_entry(pos, struct sctp_transport, transports); @@ -4025,7 +4064,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, rwlock_t *addr_lock; int cnt = 0; - if (len != sizeof(sctp_assoc_t)) + if (len < sizeof(sctp_assoc_t)) return -EINVAL; if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) @@ -4139,7 +4178,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, to += addrlen; cnt ++; space_left -= addrlen; - bytes_copied += addrlen; + *bytes_copied += addrlen; } return cnt; @@ -4167,10 +4206,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, void *buf; int bytes_copied = 0; - if (len != sizeof(struct sctp_getaddrs_old)) + if (len < sizeof(struct sctp_getaddrs_old)) return -EINVAL; - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) + len = sizeof(struct sctp_getaddrs_old); + if (copy_from_user(&getaddrs, optval, len)) return -EFAULT; if (getaddrs.addr_num <= 0) return -EINVAL; @@ -4242,7 +4282,7 @@ copy_getaddrs: /* copy the leading structure back to user */ getaddrs.addr_num = cnt; - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) + if (copy_to_user(optval, &getaddrs, len)) err = -EFAULT; error: @@ -4270,7 +4310,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, void *addrs; void *buf; - if (len <= sizeof(struct sctp_getaddrs)) + if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) @@ -4294,8 +4334,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, } to = optval + offsetof(struct sctp_getaddrs,addrs); - space_left = len - sizeof(struct sctp_getaddrs) - - offsetof(struct sctp_getaddrs,addrs); + space_left = len - offsetof(struct sctp_getaddrs,addrs); + addrs = kmalloc(space_left, GFP_KERNEL); if (!addrs) return -ENOMEM; @@ -4343,11 +4383,12 @@ copy_getaddrs: err = -EFAULT; goto error; } - if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) - return -EFAULT; + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { + err = -EFAULT; + goto error; + } if (put_user(bytes_copied, optlen)) - return -EFAULT; - + err = -EFAULT; error: kfree(addrs); return err; @@ -4366,10 +4407,12 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); - if (len != sizeof(struct sctp_prim)) + if (len < sizeof(struct sctp_prim)) return -EINVAL; - if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) + len = sizeof(struct sctp_prim); + + if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); @@ -4385,7 +4428,9 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, (union sctp_addr *)&prim.ssp_addr); - if (copy_to_user(optval, &prim, sizeof(struct sctp_prim))) + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; @@ -4402,10 +4447,15 @@ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, { struct sctp_setadaptation adaptation; - if (len != sizeof(struct sctp_setadaptation)) + if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; + len = sizeof(struct sctp_setadaptation); + adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; + + if (put_user(len, optlen)) + return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; @@ -4439,9 +4489,12 @@ static int sctp_getsockopt_default_send_param(struct sock *sk, struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); - if (len != sizeof(struct sctp_sndrcvinfo)) + if (len < sizeof(struct sctp_sndrcvinfo)) return -EINVAL; - if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo))) + + len = sizeof(struct sctp_sndrcvinfo); + + if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); @@ -4462,7 +4515,9 @@ static int sctp_getsockopt_default_send_param(struct sock *sk, info.sinfo_timetolive = sp->default_timetolive; } - if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo))) + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; @@ -4513,10 +4568,12 @@ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; - if (len != sizeof (struct sctp_rtoinfo)) + if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; - if (copy_from_user(&rtoinfo, optval, sizeof (struct sctp_rtoinfo))) + len = sizeof(struct sctp_rtoinfo); + + if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); @@ -4568,11 +4625,12 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len, struct list_head *pos; int cnt = 0; - if (len != sizeof (struct sctp_assocparams)) + if (len < sizeof (struct sctp_assocparams)) return -EINVAL; - if (copy_from_user(&assocparams, optval, - sizeof (struct sctp_assocparams))) + len = sizeof(struct sctp_assocparams); + + if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); @@ -4658,9 +4716,11 @@ static int sctp_getsockopt_context(struct sock *sk, int len, struct sctp_sock *sp; struct sctp_association *asoc; - if (len != sizeof(struct sctp_assoc_value)) + if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; + len = sizeof(struct sctp_assoc_value); + if (copy_from_user(¶ms, optval, len)) return -EFAULT; @@ -6071,8 +6131,11 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. + * + * The caller has just allocated newsk so we can guarantee that other + * paths won't try to lock it and then oldsk. */ - sctp_lock_sock(newsk); + lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 961df27..5f467c9 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -241,6 +241,45 @@ void sctp_transport_pmtu(struct sctp_transport *transport) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } +/* this is a complete rip-off from __sk_dst_check + * the cookie is always 0 since this is how it's used in the + * pmtu code + */ +static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) +{ + struct dst_entry *dst = t->dst; + + if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { + dst_release(t->dst); + t->dst = NULL; + return NULL; + } + + return dst; +} + +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) +{ + struct dst_entry *dst; + + if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { + printk(KERN_WARNING "%s: Reported pmtu %d too low, " + "using default minimum of %d\n", + __FUNCTION__, pmtu, + SCTP_DEFAULT_MINSEGMENT); + /* Use default minimum segment size and disable + * pmtu discovery on this transport. + */ + t->pathmtu = SCTP_DEFAULT_MINSEGMENT; + } else { + t->pathmtu = pmtu; + } + + dst = sctp_transport_dst_check(t); + if (dst) + dst->ops->update_pmtu(dst, pmtu); +} + /* Caches the dst entry and source address for a transport's destination * address. */ diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 099a983..c094583 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -853,7 +853,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs u32 priv_len, maj_stat; int pad, saved_len, remaining_len, offset; - rqstp->rq_sendfile_ok = 0; + rqstp->rq_splice_ok = 0; priv_len = svc_getnl(&buf->head[0]); if (rqstp->rq_deferred) { diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index e673ef9..55ea6df 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -814,7 +814,7 @@ svc_process(struct svc_rqst *rqstp) rqstp->rq_res.tail[0].iov_base = NULL; rqstp->rq_res.tail[0].iov_len = 0; /* Will be turned off only in gss privacy case: */ - rqstp->rq_sendfile_ok = 1; + rqstp->rq_splice_ok = 1; /* tcp needs a space for the record length... */ if (rqstp->rq_prot == IPPROTO_TCP) svc_putnl(resv, 0); diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 4cdafa2..6a7f7b4 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -60,7 +60,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info) rep_nlh = nlmsg_hdr(rep_buf); memcpy(rep_nlh, req_nlh, hdr_space); rep_nlh->nlmsg_len = rep_buf->len; - genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid); + genlmsg_unicast(rep_buf, NETLINK_CB(skb).pid); } return 0; diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 3ebae14..88aaacd 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -33,7 +33,7 @@ static ssize_t _show_permaddr(struct device *dev, struct device_attribute *attr, char *buf) { - char *addr = dev_to_rdev(dev)->wiphy.perm_addr; + unsigned char *addr = dev_to_rdev(dev)->wiphy.perm_addr; return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 85f3f43..dfacb9c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1729,7 +1729,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu) x->type && x->type->get_mtu) res = x->type->get_mtu(x, mtu); else - res = mtu; + res = mtu - x->props.header_len; spin_unlock_bh(&x->lock); return res; } |