aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fc.c1
-rw-r--r--net/802/garp.c5
-rw-r--r--net/802/stp.c5
-rw-r--r--net/8021q/vlan.c35
-rw-r--r--net/8021q/vlan.h31
-rw-r--r--net/8021q/vlan_core.c57
-rw-r--r--net/8021q/vlan_dev.c42
-rw-r--r--net/8021q/vlan_netlink.c3
-rw-r--r--net/8021q/vlanproc.c6
-rw-r--r--net/9p/client.c574
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/protocol.c109
-rw-r--r--net/9p/protocol.h4
-rw-r--r--net/9p/trans_common.c57
-rw-r--r--net/9p/trans_common.h21
-rw-r--r--net/9p/trans_virtio.c320
-rw-r--r--net/appletalk/aarp.c139
-rw-r--r--net/appletalk/atalk_proc.c1
-rw-r--r--net/appletalk/ddp.c404
-rw-r--r--net/atm/atm_misc.c2
-rw-r--r--net/atm/clip.c8
-rw-r--r--net/atm/common.c4
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/pppoatm.c1
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/atm/pvc.c1
-rw-r--r--net/atm/svc.c1
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/dcb/dcbevent.c1
-rw-r--r--net/dcb/dcbnl.c694
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/dccp/ccid.c4
-rw-r--r--net/dccp/ccids/ccid2.c193
-rw-r--r--net/dccp/ccids/ccid2.h31
-rw-r--r--net/dccp/ccids/lib/tfrc.c1
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c202
-rw-r--r--net/dccp/feat.h1
-rw-r--r--net/dccp/input.c61
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/dccp/output.c14
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/dccp/timer.c1
-rw-r--r--net/decnet/af_decnet.c697
-rw-r--r--net/decnet/dn_dev.c74
-rw-r--r--net/decnet/dn_fib.c79
-rw-r--r--net/decnet/dn_neigh.c42
-rw-r--r--net/decnet/dn_nsp_in.c182
-rw-r--r--net/decnet/dn_route.c133
-rw-r--r--net/decnet/dn_rules.c1
-rw-r--r--net/decnet/dn_table.c25
-rw-r--r--net/decnet/dn_timer.c19
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c18
-rw-r--r--net/decnet/sysctl_net_decnet.c17
-rw-r--r--net/dns_resolver/dns_query.c4
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/mv88e6131.c8
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/econet/af_econet.c172
-rw-r--r--net/ieee802154/Kconfig6
-rw-r--r--net/ieee802154/Makefile8
-rw-r--r--net/ieee802154/af_ieee802154.c2
-rw-r--r--net/ieee802154/dgram.c2
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ieee802154/nl-phy.c31
-rw-r--r--net/mac80211/Kconfig25
-rw-r--r--net/mac80211/aes_ccm.c37
-rw-r--r--net/mac80211/aes_ccm.h2
-rw-r--r--net/mac80211/aes_cmac.c10
-rw-r--r--net/mac80211/aes_cmac.h2
-rw-r--r--net/mac80211/agg-rx.c61
-rw-r--r--net/mac80211/agg-tx.c64
-rw-r--r--net/mac80211/cfg.c553
-rw-r--r--net/mac80211/debugfs.c71
-rw-r--r--net/mac80211/debugfs_key.c13
-rw-r--r--net/mac80211/debugfs_netdev.c66
-rw-r--r--net/mac80211/debugfs_sta.c41
-rw-r--r--net/mac80211/driver-ops.h150
-rw-r--r--net/mac80211/driver-trace.h261
-rw-r--r--net/mac80211/ht.c15
-rw-r--r--net/mac80211/ibss.c15
-rw-r--r--net/mac80211/ieee80211_i.h176
-rw-r--r--net/mac80211/iface.c40
-rw-r--r--net/mac80211/key.c178
-rw-r--r--net/mac80211/key.h32
-rw-r--r--net/mac80211/led.c1
-rw-r--r--net/mac80211/main.c93
-rw-r--r--net/mac80211/mesh.c213
-rw-r--r--net/mac80211/mesh.h38
-rw-r--r--net/mac80211/mesh_hwmp.c193
-rw-r--r--net/mac80211/mesh_pathtbl.c481
-rw-r--r--net/mac80211/mesh_plink.c257
-rw-r--r--net/mac80211/mlme.c331
-rw-r--r--net/mac80211/offchannel.c68
-rw-r--r--net/mac80211/pm.c56
-rw-r--r--net/mac80211/rate.c38
-rw-r--r--net/mac80211/rc80211_minstrel.c9
-rw-r--r--net/mac80211/rc80211_minstrel.h12
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c1
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c20
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c1
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c320
-rw-r--r--net/mac80211/scan.c142
-rw-r--r--net/mac80211/spectmgmt.c6
-rw-r--r--net/mac80211/sta_info.c986
-rw-r--r--net/mac80211/sta_info.h186
-rw-r--r--net/mac80211/status.c257
-rw-r--r--net/mac80211/tkip.c116
-rw-r--r--net/mac80211/tkip.h8
-rw-r--r--net/mac80211/tx.c606
-rw-r--r--net/mac80211/util.c421
-rw-r--r--net/mac80211/wep.c6
-rw-r--r--net/mac80211/wme.c23
-rw-r--r--net/mac80211/wme.h8
-rw-r--r--net/mac80211/work.c107
-rw-r--r--net/mac80211/wpa.c97
-rw-r--r--net/netlabel/Makefile2
-rw-r--r--net/netlabel/netlabel_addrlist.c2
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_cipso_v4.h2
-rw-r--r--net/netlabel/netlabel_domainhash.c11
-rw-r--r--net/netlabel/netlabel_domainhash.h2
-rw-r--r--net/netlabel/netlabel_kapi.c76
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_mgmt.h4
-rw-r--r--net/netlabel/netlabel_unlabeled.c21
-rw-r--r--net/netlabel/netlabel_unlabeled.h2
-rw-r--r--net/netlabel/netlabel_user.c2
-rw-r--r--net/netlabel/netlabel_user.h2
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/netrom/nr_route.c23
-rw-r--r--net/rfkill/Kconfig5
-rw-r--r--net/rfkill/core.c6
-rw-r--r--net/rfkill/input.c1
-rw-r--r--net/rfkill/rfkill-gpio.c11
-rw-r--r--net/rfkill/rfkill-regulator.c1
-rw-r--r--net/rose/af_rose.c27
-rw-r--r--net/rose/rose_link.c7
-rw-r--r--net/rose/rose_route.c6
-rw-r--r--net/rxrpc/ar-output.c1
-rw-r--r--net/rxrpc/ar-recvmsg.c12
-rw-r--r--net/sctp/associola.c13
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/bind_addr.c17
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/inqueue.c33
-rw-r--r--net/sctp/ipv6.c6
-rw-r--r--net/sctp/output.c9
-rw-r--r--net/sctp/outqueue.c17
-rw-r--r--net/sctp/proc.c1
-rw-r--r--net/sctp/protocol.c161
-rw-r--r--net/sctp/sm_make_chunk.c162
-rw-r--r--net/sctp/sm_sideeffect.c9
-rw-r--r--net/sctp/sm_statefuns.c95
-rw-r--r--net/sctp/socket.c281
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/x25/af_x25.c487
-rw-r--r--net/x25/x25_dev.c44
-rw-r--r--net/x25/x25_in.c126
-rw-r--r--net/x25/x25_link.c87
-rw-r--r--net/x25/x25_proc.c1
-rw-r--r--net/x25/x25_subr.c78
169 files changed, 8713 insertions, 4794 deletions
diff --git a/net/802/fc.c b/net/802/fc.c
index 1e49f2d..bd345f3 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -27,6 +27,7 @@
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <net/arp.h>
/*
diff --git a/net/802/garp.c b/net/802/garp.c
index 1610295..8e21b6d 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/llc.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/garp.h>
@@ -553,7 +554,7 @@ static void garp_release_port(struct net_device *dev)
if (rtnl_dereference(port->applicants[i]))
return;
}
- rcu_assign_pointer(dev->garp_port, NULL);
+ RCU_INIT_POINTER(dev->garp_port, NULL);
kfree_rcu(port, rcu);
}
@@ -605,7 +606,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
ASSERT_RTNL();
- rcu_assign_pointer(port->applicants[appl->type], NULL);
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
diff --git a/net/802/stp.c b/net/802/stp.c
index 978c30b..15540b7 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
@@ -88,9 +89,9 @@ void stp_proto_unregister(const struct stp_proto *proto)
{
mutex_lock(&stp_proto_mutex);
if (is_zero_ether_addr(proto->group_address))
- rcu_assign_pointer(stp_proto, NULL);
+ RCU_INIT_POINTER(stp_proto, NULL);
else
- rcu_assign_pointer(garp_protos[proto->group_address[5] -
+ RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], NULL);
synchronize_rcu();
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 917ecb9..963f285 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -18,6 +18,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -108,13 +110,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
grp = rtnl_dereference(real_dev->vlgrp);
BUG_ON(!grp);
- /* Take it out of our own structures, but be sure to interlock with
- * HW accelerating devices or SW vlan input packet processing if
- * VLAN is not 0 (leave it there for 802.1p).
- */
- if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
- ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
-
grp->nr_vlans--;
if (vlan->flags & VLAN_FLAG_GVRP)
@@ -131,14 +126,19 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
if (grp->nr_vlans == 0) {
vlan_gvrp_uninit_applicant(real_dev);
- rcu_assign_pointer(real_dev->vlgrp, NULL);
- if (ops->ndo_vlan_rx_register)
- ops->ndo_vlan_rx_register(real_dev, NULL);
+ RCU_INIT_POINTER(real_dev->vlgrp, NULL);
/* Free the group, after all cpu's are done. */
call_rcu(&grp->rcu, vlan_rcu_free);
}
+ /* Take it out of our own structures, but be sure to interlock with
+ * HW accelerating devices or SW vlan input packet processing if
+ * VLAN is not 0 (leave it there for 802.1p).
+ */
+ if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
+ ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+
/* Get rid of the vlan's reference to real_dev */
dev_put(real_dev);
}
@@ -149,13 +149,13 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
const struct net_device_ops *ops = real_dev->netdev_ops;
if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
- pr_info("8021q: VLANs not supported on %s\n", name);
+ pr_info("VLANs not supported on %s\n", name);
return -EOPNOTSUPP;
}
if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
(!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
- pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
+ pr_info("Device %s has buggy VLAN hw accel\n", name);
return -EOPNOTSUPP;
}
@@ -205,8 +205,6 @@ int register_vlan_dev(struct net_device *dev)
grp->nr_vlans++;
if (ngrp) {
- if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
- ops->ndo_vlan_rx_register(real_dev, ngrp);
rcu_assign_pointer(real_dev->vlgrp, ngrp);
}
if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
@@ -344,13 +342,12 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event)
case NETDEV_CHANGENAME:
vlan_proc_rem_dev(dev);
if (vlan_proc_add_dev(dev) < 0)
- pr_warning("8021q: failed to change proc name for %s\n",
- dev->name);
+ pr_warn("failed to change proc name for %s\n",
+ dev->name);
break;
case NETDEV_REGISTER:
if (vlan_proc_add_dev(dev) < 0)
- pr_warning("8021q: failed to add proc entry for %s\n",
- dev->name);
+ pr_warn("failed to add proc entry for %s\n", dev->name);
break;
case NETDEV_UNREGISTER:
vlan_proc_rem_dev(dev);
@@ -374,7 +371,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if ((event == NETDEV_UP) &&
(dev->features & NETIF_F_HW_VLAN_FILTER) &&
dev->netdev_ops->ndo_vlan_rx_add_vid) {
- pr_info("8021q: adding VLAN 0 to HW filter on device %s\n",
+ pr_info("adding VLAN 0 to HW filter on device %s\n",
dev->name);
dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9da07e3..9fd45f3 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -74,6 +74,37 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
return netdev_priv(dev);
}
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
+ u16 vlan_id)
+{
+ struct net_device **array;
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg,
+ u16 vlan_id,
+ struct net_device *dev)
+{
+ struct net_device **array;
+ if (!vg)
+ return;
+ array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
+ array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
+}
+
+/* Must be invoked with rcu_read_lock or with RTNL. */
+static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
+ u16 vlan_id)
+{
+ struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+
+ if (grp)
+ return vlan_group_get_device(grp, vlan_id);
+
+ return NULL;
+}
+
/* found in vlan_dev.c */
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index c177f9e..77d3532 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -2,6 +2,7 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
+#include <linux/export.h>
#include "vlan.h"
bool vlan_do_receive(struct sk_buff **skbp)
@@ -12,11 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
struct vlan_pcpu_stats *rx_stats;
vlan_dev = vlan_find_dev(skb->dev, vlan_id);
- if (!vlan_dev) {
- if (vlan_id)
- skb->pkt_type = PACKET_OTHERHOST;
+ if (!vlan_dev)
return false;
- }
skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
@@ -63,6 +61,27 @@ bool vlan_do_receive(struct sk_buff **skbp)
return true;
}
+/* Must be invoked with rcu_read_lock or with RTNL. */
+struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+ u16 vlan_id)
+{
+ struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+
+ if (grp) {
+ return vlan_group_get_device(grp, vlan_id);
+ } else {
+ /*
+ * Bonding slaves do not have grp assigned to themselves.
+ * Grp is assigned to bonding master instead.
+ */
+ if (netif_is_bond_slave(real_dev))
+ return __vlan_find_dev_deep(real_dev->master, vlan_id);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(__vlan_find_dev_deep);
+
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
return vlan_dev_info(dev)->real_dev;
@@ -75,35 +94,13 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
}
EXPORT_SYMBOL(vlan_dev_vlan_id);
-/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
-int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
- u16 vlan_tci, int polling)
-{
- __vlan_hwaccel_put_tag(skb, vlan_tci);
- return polling ? netif_receive_skb(skb) : netif_rx(skb);
-}
-EXPORT_SYMBOL(__vlan_hwaccel_rx);
-
-gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
- unsigned int vlan_tci, struct sk_buff *skb)
-{
- __vlan_hwaccel_put_tag(skb, vlan_tci);
- return napi_gro_receive(napi, skb);
-}
-EXPORT_SYMBOL(vlan_gro_receive);
-
-gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
- unsigned int vlan_tci)
-{
- __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
- return napi_gro_frags(napi);
-}
-EXPORT_SYMBOL(vlan_gro_frags);
-
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
{
- if (skb_cow(skb, skb_headroom(skb)) < 0)
+ if (skb_cow(skb, skb_headroom(skb)) < 0) {
+ kfree_skb(skb);
return NULL;
+ }
+
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
return skb;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b172407..c43a788 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -20,6 +20,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
@@ -55,7 +57,7 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
return arp_find(veth->h_dest, skb);
#endif
default:
- pr_debug("%s: unable to resolve type %X addresses.\n",
+ pr_debug("%s: unable to resolve type %X addresses\n",
dev->name, ntohs(veth->h_vlan_encapsulated_proto));
memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
@@ -475,10 +477,12 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
- dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ }
}
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
@@ -518,6 +522,25 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .rebuild = dev_rebuild_header,
+ .parse = eth_header_parse,
+};
+
static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
@@ -557,7 +580,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
- dev->header_ops = real_dev->header_ops;
+ dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
@@ -602,8 +625,7 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
features &= real_dev->features;
features &= real_dev->vlan_features;
- if (old_features & NETIF_F_SOFT_FEATURES)
- features |= old_features & NETIF_F_SOFT_FEATURES;
+ features |= old_features & NETIF_F_SOFT_FEATURES;
if (dev_ethtool_get_rx_csum(real_dev))
features |= NETIF_F_RXCSUM;
@@ -616,7 +638,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
- return dev_ethtool_get_settings(vlan->real_dev, cmd);
+
+ return __ethtool_get_settings(vlan->real_dev, cmd);
}
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -680,7 +703,6 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
- .ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index be9a5c1..c705612 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
@@ -151,7 +152,7 @@ static size_t vlan_get_size(const struct net_device *dev)
struct vlan_dev_info *vlan = vlan_dev_info(dev);
return nla_total_size(2) + /* IFLA_VLAN_ID */
- sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d940c49..d34b6da 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -17,6 +17,8 @@
* Jan 20, 1998 Ben Greear Initial Version
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -155,7 +157,7 @@ int __net_init vlan_proc_init(struct net *net)
return 0;
err:
- pr_err("%s: can't create entry in proc filesystem!\n", __func__);
+ pr_err("can't create entry in proc filesystem!\n");
vlan_proc_cleanup(net);
return -ENOBUFS;
}
@@ -229,7 +231,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
- dev = (struct net_device *)v;
+ dev = v;
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&net->dev_base_head);
diff --git a/net/9p/client.c b/net/9p/client.c
index 5532710..e958178 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -38,6 +38,9 @@
#include <net/9p/transport.h>
#include "protocol.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/9p.h>
+
/*
* Client Option Parsing (code inspired by NFS code)
* - a little lazy - parse all client options
@@ -72,23 +75,22 @@ inline int p9_is_proto_dotu(struct p9_client *clnt)
EXPORT_SYMBOL(p9_is_proto_dotu);
/* Interpret mount option for protocol version */
-static int get_protocol_version(const substring_t *name)
+static int get_protocol_version(char *s)
{
int version = -EINVAL;
- if (!strncmp("9p2000", name->from, name->to-name->from)) {
+ if (!strcmp(s, "9p2000")) {
version = p9_proto_legacy;
P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
- } else if (!strncmp("9p2000.u", name->from, name->to-name->from)) {
+ } else if (!strcmp(s, "9p2000.u")) {
version = p9_proto_2000u;
P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
- } else if (!strncmp("9p2000.L", name->from, name->to-name->from)) {
+ } else if (!strcmp(s, "9p2000.L")) {
version = p9_proto_2000L;
P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
- } else {
- P9_DPRINTK(P9_DEBUG_ERROR, "Unknown protocol version %s. ",
- name->from);
- }
+ } else
+ printk(KERN_INFO "9p: Unknown protocol version %s.\n", s);
+
return version;
}
@@ -106,6 +108,7 @@ static int parse_opts(char *opts, struct p9_client *clnt)
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
+ char *s;
int ret = 0;
clnt->proto_version = p9_proto_2000u;
@@ -123,40 +126,57 @@ static int parse_opts(char *opts, struct p9_client *clnt)
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
- int token;
+ int token, r;
if (!*p)
continue;
token = match_token(p, tokens, args);
- if (token < Opt_trans) {
- int r = match_int(&args[0], &option);
+ switch (token) {
+ case Opt_msize:
+ r = match_int(&args[0], &option);
if (r < 0) {
P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ "integer field, but no integer?\n");
ret = r;
continue;
}
- }
- switch (token) {
- case Opt_msize:
clnt->msize = option;
break;
case Opt_trans:
- clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
- if(clnt->trans_mod == NULL) {
+ s = match_strdup(&args[0]);
+ if (!s) {
+ ret = -ENOMEM;
P9_DPRINTK(P9_DEBUG_ERROR,
- "Could not find request transport: %s\n",
- (char *) &args[0]);
+ "problem allocating copy of trans arg\n");
+ goto free_and_return;
+ }
+ clnt->trans_mod = v9fs_get_trans_by_name(s);
+ if (clnt->trans_mod == NULL) {
+ printk(KERN_INFO
+ "9p: Could not find "
+ "request transport: %s\n", s);
ret = -EINVAL;
+ kfree(s);
goto free_and_return;
}
+ kfree(s);
break;
case Opt_legacy:
clnt->proto_version = p9_proto_legacy;
break;
case Opt_version:
- ret = get_protocol_version(&args[0]);
- if (ret == -EINVAL)
+ s = match_strdup(&args[0]);
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of version arg\n");
+ goto free_and_return;
+ }
+ ret = get_protocol_version(s);
+ if (ret == -EINVAL) {
+ kfree(s);
goto free_and_return;
+ }
+ kfree(s);
clnt->proto_version = ret;
break;
default:
@@ -184,11 +204,13 @@ free_and_return:
*
*/
-static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
+static struct p9_req_t *
+p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
{
unsigned long flags;
int row, col;
struct p9_req_t *req;
+ int alloc_msize = min(c->msize, max_size);
/* This looks up the original request by tag so we know which
* buffer to read the data into */
@@ -226,23 +248,10 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
init_waitqueue_head(req->wq);
- if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) {
- int alloc_msize = min(c->msize, 4096);
- req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_NOFS);
- req->tc->capacity = alloc_msize;
- req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_NOFS);
- req->rc->capacity = alloc_msize;
- } else {
- req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_NOFS);
- req->tc->capacity = c->msize;
- req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_NOFS);
- req->rc->capacity = c->msize;
- }
+ req->tc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
+ GFP_NOFS);
+ req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
+ GFP_NOFS);
if ((!req->tc) || (!req->rc)) {
printk(KERN_ERR "Couldn't grow tag array\n");
kfree(req->tc);
@@ -252,6 +261,8 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
req->wq = NULL;
return ERR_PTR(-ENOMEM);
}
+ req->tc->capacity = alloc_msize;
+ req->rc->capacity = alloc_msize;
req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
}
@@ -280,7 +291,7 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
* buffer to read the data into */
tag++;
- if(tag >= c->max_tag)
+ if(tag >= c->max_tag)
return NULL;
row = tag / P9_ROW_MAXTAG;
@@ -456,37 +467,22 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
int ecode;
err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
+ /*
+ * dump the response from server
+ * This should be after check errors which poplulate pdu_fcall.
+ */
+ trace_9p_protocol_dump(c, req->rc);
if (err) {
P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
return err;
}
-
if (type != P9_RERROR && type != P9_RLERROR)
return 0;
if (!p9_is_proto_dotl(c)) {
char *ename;
-
- if (req->tc->pbuf_size) {
- /* Handle user buffers */
- size_t len = req->rc->size - req->rc->offset;
- if (req->tc->pubuf) {
- /* User Buffer */
- err = copy_from_user(
- &req->rc->sdata[req->rc->offset],
- req->tc->pubuf, len);
- if (err) {
- err = -EFAULT;
- goto out_err;
- }
- } else {
- /* Kernel Buffer */
- memmove(&req->rc->sdata[req->rc->offset],
- req->tc->pkbuf, len);
- }
- }
err = p9pdu_readf(req->rc, c->proto_version, "s?d",
- &ename, &ecode);
+ &ename, &ecode);
if (err)
goto out_err;
@@ -496,11 +492,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode,
- ename);
-
- kfree(ename);
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
}
+ kfree(ename);
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
err = -ecode;
@@ -508,7 +503,6 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
-
return err;
out_err:
@@ -517,6 +511,115 @@ out_err:
return err;
}
+/**
+ * p9_check_zc_errors - check 9p packet for error return and process it
+ * @c: current client instance
+ * @req: request to parse and check for error conditions
+ * @in_hdrlen: Size of response protocol buffer.
+ *
+ * returns error code if one is discovered, otherwise returns 0
+ *
+ * this will have to be more complicated if we have multiple
+ * error packet types
+ */
+
+static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
+ char *uidata, int in_hdrlen, int kern_buf)
+{
+ int err;
+ int ecode;
+ int8_t type;
+ char *ename = NULL;
+
+ err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
+ /*
+ * dump the response from server
+ * This should be after parse_header which poplulate pdu_fcall.
+ */
+ trace_9p_protocol_dump(c, req->rc);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+ return err;
+ }
+
+ if (type != P9_RERROR && type != P9_RLERROR)
+ return 0;
+
+ if (!p9_is_proto_dotl(c)) {
+ /* Error is reported in string format */
+ uint16_t len;
+ /* 7 = header size for RERROR, 2 is the size of string len; */
+ int inline_len = in_hdrlen - (7 + 2);
+
+ /* Read the size of error string */
+ err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
+ if (err)
+ goto out_err;
+
+ ename = kmalloc(len + 1, GFP_NOFS);
+ if (!ename) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ if (len <= inline_len) {
+ /* We have error in protocol buffer itself */
+ if (pdu_read(req->rc, ename, len)) {
+ err = -EFAULT;
+ goto out_free;
+
+ }
+ } else {
+ /*
+ * Part of the data is in user space buffer.
+ */
+ if (pdu_read(req->rc, ename, inline_len)) {
+ err = -EFAULT;
+ goto out_free;
+
+ }
+ if (kern_buf) {
+ memcpy(ename + inline_len, uidata,
+ len - inline_len);
+ } else {
+ err = copy_from_user(ename + inline_len,
+ uidata, len - inline_len);
+ if (err) {
+ err = -EFAULT;
+ goto out_free;
+ }
+ }
+ }
+ ename[len] = 0;
+ if (p9_is_proto_dotu(c)) {
+ /* For dotu we also have error code */
+ err = p9pdu_readf(req->rc,
+ c->proto_version, "d", &ecode);
+ if (err)
+ goto out_free;
+ err = -ecode;
+ }
+ if (!err || !IS_ERR_VALUE(err)) {
+ err = p9_errstr2errno(ename, strlen(ename));
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
+ }
+ kfree(ename);
+ } else {
+ err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
+ err = -ecode;
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ }
+ return err;
+
+out_free:
+ kfree(ename);
+out_err:
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+ return err;
+}
+
static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
@@ -560,23 +663,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
return 0;
}
-/**
- * p9_client_rpc - issue a request and wait for a response
- * @c: client session
- * @type: type of request
- * @fmt: protocol format string (see protocol.c)
- *
- * Returns request structure (which client must free using p9_free_req)
- */
-
-static struct p9_req_t *
-p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
+ int8_t type, int req_size,
+ const char *fmt, va_list ap)
{
- va_list ap;
int tag, err;
struct p9_req_t *req;
- unsigned long flags;
- int sigpending;
P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
@@ -588,12 +680,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
return ERR_PTR(-EIO);
- if (signal_pending(current)) {
- sigpending = 1;
- clear_thread_flag(TIF_SIGPENDING);
- } else
- sigpending = 0;
-
tag = P9_NOTAG;
if (type != P9_TVERSION) {
tag = p9_idpool_get(c->tagpool);
@@ -601,18 +687,51 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
return ERR_PTR(-ENOMEM);
}
- req = p9_tag_alloc(c, tag);
+ req = p9_tag_alloc(c, tag, req_size);
if (IS_ERR(req))
return req;
/* marshall the data */
p9pdu_prepare(req->tc, tag, type);
- va_start(ap, fmt);
err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
- va_end(ap);
if (err)
goto reterr;
- p9pdu_finalize(req->tc);
+ p9pdu_finalize(c, req->tc);
+ trace_9p_client_req(c, type, tag);
+ return req;
+reterr:
+ p9_free_req(c, req);
+ return ERR_PTR(err);
+}
+
+/**
+ * p9_client_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+
+static struct p9_req_t *
+p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+{
+ va_list ap;
+ int sigpending, err;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ va_start(ap, fmt);
+ req = p9_client_prepare_req(c, type, c->msize, fmt, ap);
+ va_end(ap);
+ if (IS_ERR(req))
+ return req;
+
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ } else
+ sigpending = 0;
err = c->trans_mod->request(c, req);
if (err < 0) {
@@ -620,18 +739,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
c->status = Disconnected;
goto reterr;
}
-
- P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
+ /* Wait for the response */
err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
- P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n",
- req->wq, tag, err);
+ req->status >= REQ_STATUS_RCVD);
if (req->status == REQ_STATUS_ERROR) {
P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
-
if ((err == -ERESTARTSYS) && (c->status == Connected)) {
P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
@@ -644,25 +759,103 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if (req->status == REQ_STATUS_RCVD)
err = 0;
}
-
if (sigpending) {
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
-
if (err < 0)
goto reterr;
err = p9_check_errors(c, req);
- if (!err) {
- P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type);
+ trace_9p_client_res(c, type, req->rc->tag, err);
+ if (!err)
return req;
+reterr:
+ p9_free_req(c, req);
+ return ERR_PTR(err);
+}
+
+/**
+ * p9_client_zc_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @uidata: user bffer that should be ued for zero copy read
+ * @uodata: user buffer that shoud be user for zero copy write
+ * @inlen: read buffer size
+ * @olen: write buffer size
+ * @hdrlen: reader header size, This is the size of response protocol data
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ char *uidata, char *uodata,
+ int inlen, int olen, int in_hdrlen,
+ int kern_buf, const char *fmt, ...)
+{
+ va_list ap;
+ int sigpending, err;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ va_start(ap, fmt);
+ /*
+ * We allocate a inline protocol data of only 4k bytes.
+ * The actual content is passed in zero-copy fashion.
+ */
+ req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap);
+ va_end(ap);
+ if (IS_ERR(req))
+ return req;
+
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ } else
+ sigpending = 0;
+
+ /* If we are called with KERNEL_DS force kern_buf */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ kern_buf = 1;
+
+ err = c->trans_mod->zc_request(c, req, uidata, uodata,
+ inlen, olen, in_hdrlen, kern_buf);
+ if (err < 0) {
+ if (err == -EIO)
+ c->status = Disconnected;
+ if (err != -ERESTARTSYS)
+ goto reterr;
+ }
+ if (req->status == REQ_STATUS_ERROR) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ err = req->t_err;
+ }
+ if ((err == -ERESTARTSYS) && (c->status == Connected)) {
+ P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+
+ if (c->trans_mod->cancel(c, req))
+ p9_client_flush(c, req);
+
+ /* if we received the response anyway, don't signal error */
+ if (req->status == REQ_STATUS_RCVD)
+ err = 0;
+ }
+ if (sigpending) {
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
+ if (err < 0)
+ goto reterr;
+ err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
+ trace_9p_client_res(c, type, req->rc->tag, err);
+ if (!err)
+ return req;
reterr:
- P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
- err);
p9_free_req(c, req);
return ERR_PTR(err);
}
@@ -750,7 +943,7 @@ static int p9_client_version(struct p9_client *c)
err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
if (err) {
P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(c, req->rc);
goto error;
}
@@ -887,15 +1080,14 @@ EXPORT_SYMBOL(p9_client_begin_disconnect);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
char *uname, u32 n_uname, char *aname)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_fid *fid;
struct p9_qid qid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
- afid ? afid->fid : -1, uname, aname);
- err = 0;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
+ afid ? afid->fid : -1, uname, aname);
fid = p9_fid_create(clnt);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
@@ -912,7 +1104,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -972,7 +1164,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto clunk_fid;
}
@@ -1039,7 +1231,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1082,7 +1274,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1127,7 +1319,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1166,7 +1358,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1284,17 +1476,42 @@ error:
}
EXPORT_SYMBOL(p9_client_remove);
+int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
+{
+ int err = 0;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
+ dfid->fid, name, flags);
+
+ clnt = dfid->clnt;
+ req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
+
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_unlinkat);
+
int
p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
u32 count)
{
- int err, rsize;
- struct p9_client *clnt;
- struct p9_req_t *req;
char *dataptr;
+ int kernel_buf = 0;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+ int err, rsize, non_zc = 0;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", fid->fid,
- (long long unsigned) offset, count);
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ fid->fid, (long long unsigned) offset, count);
err = 0;
clnt = fid->clnt;
@@ -1306,13 +1523,24 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
rsize = count;
/* Don't bother zerocopy for small IO (< 1024) */
- if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
- req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
- rsize, data, udata);
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ char *indata;
+ if (data) {
+ kernel_buf = 1;
+ indata = data;
+ } else
+ indata = (char *)udata;
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
+ 11, kernel_buf, "dqd", fid->fid,
+ offset, rsize);
} else {
+ non_zc = 1;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
- rsize);
+ rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1321,13 +1549,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
- if (!req->tc->pbuf_size) {
+ if (non_zc) {
if (data) {
memmove(data, dataptr, count);
} else {
@@ -1353,6 +1581,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count)
{
int err, rsize;
+ int kernel_buf = 0;
struct p9_client *clnt;
struct p9_req_t *req;
@@ -1368,19 +1597,24 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
if (count < rsize)
rsize = count;
- /* Don't bother zerocopy form small IO (< 1024) */
- if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
- req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset,
- rsize, data, udata);
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ char *odata;
+ if (data) {
+ kernel_buf = 1;
+ odata = data;
+ } else
+ odata = (char *)udata;
+ req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
+ P9_ZC_HDR_SZ, kernel_buf, "dqd",
+ fid->fid, offset, rsize);
} else {
-
if (data)
req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
- offset, rsize, data);
+ offset, rsize, data);
else
req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
- offset, rsize, udata);
+ offset, rsize, udata);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1389,7 +1623,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1429,7 +1663,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1480,7 +1714,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1628,7 +1862,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
&sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
&sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1646,7 +1880,8 @@ error:
}
EXPORT_SYMBOL(p9_client_statfs);
-int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name)
+int p9_client_rename(struct p9_fid *fid,
+ struct p9_fid *newdirfid, const char *name)
{
int err;
struct p9_req_t *req;
@@ -1673,6 +1908,36 @@ error:
}
EXPORT_SYMBOL(p9_client_rename);
+int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
+ struct p9_fid *newdirfid, const char *new_name)
+{
+ int err;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+
+ err = 0;
+ clnt = olddirfid->clnt;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s"
+ " newdirfid %d new name %s\n", olddirfid->fid, old_name,
+ newdirfid->fid, new_name);
+
+ req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid,
+ old_name, newdirfid->fid, new_name);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
+ newdirfid->fid, new_name);
+
+ p9_free_req(clnt, req);
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_renameat);
+
/*
* An xattrwalk without @attr_name gives the fid for the lisxattr namespace
*/
@@ -1704,7 +1969,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
}
err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto clunk_fid;
}
@@ -1750,7 +2015,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
- int err, rsize;
+ int err, rsize, non_zc = 0;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
@@ -1768,13 +2033,18 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
if (count < rsize)
rsize = count;
- if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) {
- req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid,
- offset, rsize, data);
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
+ 11, 1, "dqd", fid->fid, offset, rsize);
} else {
+ non_zc = 1;
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
- offset, rsize);
+ offset, rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1783,13 +2053,13 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
- if (!req->tc->pbuf_size && data)
+ if (non_zc)
memmove(data, dataptr, count);
p9_free_req(clnt, req);
@@ -1820,7 +2090,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
@@ -1851,7 +2121,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
@@ -1886,7 +2156,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
@@ -1919,7 +2189,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
&glock->start, &glock->length, &glock->proc_id,
&glock->client_id);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
@@ -1947,7 +2217,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
if (err) {
- p9pdu_dump(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 72c3982..2664d12 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -80,14 +80,14 @@ EXPORT_SYMBOL(v9fs_unregister_trans);
* @name: string identifying transport
*
*/
-struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name)
+struct p9_trans_module *v9fs_get_trans_by_name(char *s)
{
struct p9_trans_module *t, *found = NULL;
spin_lock(&v9fs_trans_lock);
list_for_each_entry(t, &v9fs_trans_list, list)
- if (strncmp(t->name, name->from, name->to-name->from) == 0 &&
+ if (strcmp(t->name, s) == 0 &&
try_module_get(t->owner)) {
found = t;
break;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index a873277..55e10a9 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -37,46 +37,11 @@
#include <net/9p/client.h>
#include "protocol.h"
+#include <trace/events/9p.h>
+
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
-#ifdef CONFIG_NET_9P_DEBUG
-void
-p9pdu_dump(int way, struct p9_fcall *pdu)
-{
- int i, n;
- u8 *data = pdu->sdata;
- int datalen = pdu->size;
- char buf[255];
- int buflen = 255;
-
- i = n = 0;
- if (datalen > (buflen-16))
- datalen = buflen-16;
- while (i < datalen) {
- n += scnprintf(buf + n, buflen - n, "%02x ", data[i]);
- if (i%4 == 3)
- n += scnprintf(buf + n, buflen - n, " ");
- if (i%32 == 31)
- n += scnprintf(buf + n, buflen - n, "\n");
-
- i++;
- }
- n += scnprintf(buf + n, buflen - n, "\n");
-
- if (way)
- P9_DPRINTK(P9_DEBUG_PKT, "[[[(%d) %s\n", datalen, buf);
- else
- P9_DPRINTK(P9_DEBUG_PKT, "]]](%d) %s\n", datalen, buf);
-}
-#else
-void
-p9pdu_dump(int way, struct p9_fcall *pdu)
-{
-}
-#endif
-EXPORT_SYMBOL(p9pdu_dump);
-
void p9stat_free(struct p9_wstat *stbuf)
{
kfree(stbuf->name);
@@ -87,7 +52,7 @@ void p9stat_free(struct p9_wstat *stbuf)
}
EXPORT_SYMBOL(p9stat_free);
-static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
+size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
{
size_t len = min(pdu->size - pdu->offset, size);
memcpy(data, &pdu->sdata[pdu->offset], len);
@@ -114,26 +79,6 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
return size - len;
}
-static size_t
-pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
- size_t size)
-{
- BUG_ON(pdu->size > P9_IOHDRSZ);
- pdu->pubuf = (char __user *)udata;
- pdu->pkbuf = (char *)kdata;
- pdu->pbuf_size = size;
- return 0;
-}
-
-static size_t
-pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
-{
- BUG_ON(pdu->size > P9_READDIRHDRSZ);
- pdu->pkbuf = (char *)kdata;
- pdu->pbuf_size = size;
- return 0;
-}
-
/*
b - int8_t
w - int16_t
@@ -465,26 +410,6 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
errcode = -EFAULT;
}
break;
- case 'E':{
- int32_t cnt = va_arg(ap, int32_t);
- const char *k = va_arg(ap, const void *);
- const char __user *u = va_arg(ap,
- const void __user *);
- errcode = p9pdu_writef(pdu, proto_version, "d",
- cnt);
- if (!errcode && pdu_write_urw(pdu, k, u, cnt))
- errcode = -EFAULT;
- }
- break;
- case 'F':{
- int32_t cnt = va_arg(ap, int32_t);
- const char *k = va_arg(ap, const void *);
- errcode = p9pdu_writef(pdu, proto_version, "d",
- cnt);
- if (!errcode && pdu_write_readdir(pdu, k, cnt))
- errcode = -EFAULT;
- }
- break;
case 'U':{
int32_t count = va_arg(ap, int32_t);
const char __user *udata =
@@ -597,7 +522,7 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
return ret;
}
-int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
+int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
{
struct p9_fcall fake_pdu;
int ret;
@@ -607,10 +532,10 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
- ret = p9pdu_readf(&fake_pdu, proto_version, "S", st);
+ ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
if (ret) {
P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
- p9pdu_dump(1, &fake_pdu);
+ trace_9p_protocol_dump(clnt, &fake_pdu);
}
return ret;
@@ -623,7 +548,7 @@ int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
-int p9pdu_finalize(struct p9_fcall *pdu)
+int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
{
int size = pdu->size;
int err;
@@ -632,11 +557,7 @@ int p9pdu_finalize(struct p9_fcall *pdu)
err = p9pdu_writef(pdu, 0, "d", size);
pdu->size = size;
-#ifdef CONFIG_NET_9P_DEBUG
- if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT)
- p9pdu_dump(0, pdu);
-#endif
-
+ trace_9p_protocol_dump(clnt, pdu);
P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
pdu->id, pdu->tag);
@@ -647,14 +568,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
- pdu->private = NULL;
- pdu->pubuf = NULL;
- pdu->pkbuf = NULL;
- pdu->pbuf_size = 0;
}
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version)
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent)
{
struct p9_fcall fake_pdu;
int ret;
@@ -665,11 +582,11 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
- ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid,
- &dirent->d_off, &dirent->d_type, &nameptr);
+ ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
+ &dirent->d_off, &dirent->d_type, &nameptr);
if (ret) {
P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
- p9pdu_dump(1, &fake_pdu);
+ trace_9p_protocol_dump(clnt, &fake_pdu);
goto out;
}
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index 2431c0f..2cc525fa 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -29,6 +29,6 @@ int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap);
int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
-int p9pdu_finalize(struct p9_fcall *pdu);
-void p9pdu_dump(int, struct p9_fcall *);
+int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu);
void p9pdu_reset(struct p9_fcall *pdu);
+size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9a70ebd..2ee3879 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -21,30 +21,25 @@
/**
* p9_release_req_pages - Release pages after the transaction.
- * @*private: PDU's private page of struct trans_rpage_info
*/
-void
-p9_release_req_pages(struct trans_rpage_info *rpinfo)
+void p9_release_pages(struct page **pages, int nr_pages)
{
- int i = 0;
+ int i;
- while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) {
- put_page(rpinfo->rp_data[i]);
- i++;
- }
+ for (i = 0; i < nr_pages; i++)
+ if (pages[i])
+ put_page(pages[i]);
}
-EXPORT_SYMBOL(p9_release_req_pages);
+EXPORT_SYMBOL(p9_release_pages);
/**
* p9_nr_pages - Return number of pages needed to accommodate the payload.
*/
-int
-p9_nr_pages(struct p9_req_t *req)
+int p9_nr_pages(char *data, int len)
{
unsigned long start_page, end_page;
- start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT;
- end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start_page = (unsigned long)data >> PAGE_SHIFT;
+ end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
return end_page - start_page;
}
EXPORT_SYMBOL(p9_nr_pages);
@@ -58,35 +53,17 @@ EXPORT_SYMBOL(p9_nr_pages);
* @nr_pages: number of pages to accommodate the payload
* @rw: Indicates if the pages are for read or write.
*/
-int
-p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
- int nr_pages, u8 rw)
-{
- uint32_t first_page_bytes = 0;
- int32_t pdata_mapped_pages;
- struct trans_rpage_info *rpinfo;
-
- *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
- if (*pdata_off)
- first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
- req->tc->pbuf_size);
+int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
+{
+ int nr_mapped_pages;
- rpinfo = req->tc->private;
- pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
- nr_pages, rw, &rpinfo->rp_data[0]);
- if (pdata_mapped_pages <= 0)
- return pdata_mapped_pages;
+ nr_mapped_pages = get_user_pages_fast((unsigned long)data,
+ *nr_pages, write, pages);
+ if (nr_mapped_pages <= 0)
+ return nr_mapped_pages;
- rpinfo->rp_nr_pages = pdata_mapped_pages;
- if (*pdata_off) {
- *pdata_len = first_page_bytes;
- *pdata_len += min((req->tc->pbuf_size - *pdata_len),
- ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
- } else {
- *pdata_len = min(req->tc->pbuf_size,
- (size_t)pdata_mapped_pages << PAGE_SHIFT);
- }
+ *nr_pages = nr_mapped_pages;
return 0;
}
EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 7630922..173bb55 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -12,21 +12,6 @@
*
*/
-/* TRUE if it is user context */
-#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
-
-/**
- * struct trans_rpage_info - To store mapped page information in PDU.
- * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
- * @rp_nr_pages: Number of mapped pages
- * @rp_data: Array of page pointers
- */
-struct trans_rpage_info {
- u8 rp_alloc;
- int rp_nr_pages;
- struct page *rp_data[0];
-};
-
-void p9_release_req_pages(struct trans_rpage_info *);
-int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
-int p9_nr_pages(struct p9_req_t *);
+void p9_release_pages(struct page **, int);
+int p9_payload_gup(char *, int *, struct page **, int);
+int p9_nr_pages(char *, int);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e317583..55f0c09 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -37,6 +37,7 @@
#include <linux/inet.h>
#include <linux/idr.h>
#include <linux/file.h>
+#include <linux/highmem.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
@@ -150,12 +151,10 @@ static void req_done(struct virtqueue *vq)
while (1) {
spin_lock_irqsave(&chan->lock, flags);
rc = virtqueue_get_buf(chan->vq, &len);
-
if (rc == NULL) {
spin_unlock_irqrestore(&chan->lock, flags);
break;
}
-
chan->ring_bufs_avail = 1;
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
@@ -163,17 +162,6 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
- if (req->tc->private) {
- struct trans_rpage_info *rp = req->tc->private;
- int p = rp->rp_nr_pages;
- /*Release pages */
- p9_release_req_pages(rp);
- atomic_sub(p, &vp_pinned);
- wake_up(&vp_wq);
- if (rp->rp_alloc)
- kfree(rp);
- req->tc->private = NULL;
- }
req->status = REQ_STATUS_RCVD;
p9_client_cb(chan->client, req);
}
@@ -193,9 +181,8 @@ static void req_done(struct virtqueue *vq)
*
*/
-static int
-pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
- int count)
+static int pack_sg_list(struct scatterlist *sg, int start,
+ int limit, char *data, int count)
{
int s;
int index = start;
@@ -224,31 +211,36 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
- * @pdata_off: Offset into the first page
* @**pdata: a list of pages to add into sg.
+ * @nr_pages: number of pages to pack into the scatter/gather list
+ * @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*/
static int
-pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
- struct page **pdata, int count)
+pack_sg_list_p(struct scatterlist *sg, int start, int limit,
+ struct page **pdata, int nr_pages, char *data, int count)
{
- int s;
- int i = 0;
+ int i = 0, s;
+ int data_off;
int index = start;
- if (pdata_off) {
- s = min((int)(PAGE_SIZE - pdata_off), count);
- sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
- count -= s;
- }
-
- while (count) {
- BUG_ON(index > limit);
- s = min((int)PAGE_SIZE, count);
- sg_set_page(&sg[index++], pdata[i++], s, 0);
+ BUG_ON(nr_pages > (limit - start));
+ /*
+ * if the first page doesn't start at
+ * page boundary find the offset
+ */
+ data_off = offset_in_page(data);
+ while (nr_pages) {
+ s = rest_of_page(data);
+ if (s > count)
+ s = count;
+ sg_set_page(&sg[index++], pdata[i++], s, data_off);
+ data_off = 0;
+ data += s;
count -= s;
+ nr_pages--;
}
- return index-start;
+ return index - start;
}
/**
@@ -261,114 +253,166 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
- int in, out, inp, outp;
- struct virtio_chan *chan = client->trans;
+ int err;
+ int in, out;
unsigned long flags;
- size_t pdata_off = 0;
- struct trans_rpage_info *rpinfo = NULL;
- int err, pdata_len = 0;
+ struct virtio_chan *chan = client->trans;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req->status = REQ_STATUS_SENT;
+req_retry:
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Handle out VirtIO ring buffers */
+ out = pack_sg_list(chan->sg, 0,
+ VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
- if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
- int nr_pages = p9_nr_pages(req);
- int rpinfo_size = sizeof(struct trans_rpage_info) +
- sizeof(struct page *) * nr_pages;
+ in = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
- if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
- err = wait_event_interruptible(vp_wq,
- atomic_read(&vp_pinned) < chan->p9_max_pages);
+ err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ chan->ring_bufs_avail = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ err = wait_event_interruptible(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
- }
- if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
- /* We can use sdata */
- req->tc->private = req->tc->sdata + req->tc->size;
- rpinfo = (struct trans_rpage_info *)req->tc->private;
- rpinfo->rp_alloc = 0;
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ goto req_retry;
} else {
- req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
- if (!req->tc->private) {
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
- "private kmalloc returned NULL");
- return -ENOMEM;
- }
- rpinfo = (struct trans_rpage_info *)req->tc->private;
- rpinfo->rp_alloc = 1;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ return -EIO;
}
+ }
+ virtqueue_kick(chan->vq);
+ spin_unlock_irqrestore(&chan->lock, flags);
- err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
- req->tc->id == P9_TREAD ? 1 : 0);
- if (err < 0) {
- if (rpinfo->rp_alloc)
- kfree(rpinfo);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+ return 0;
+}
+
+static int p9_get_mapped_pages(struct virtio_chan *chan,
+ struct page **pages, char *data,
+ int nr_pages, int write, int kern_buf)
+{
+ int err;
+ if (!kern_buf) {
+ /*
+ * We allow only p9_max_pages pinned. We wait for the
+ * Other zc request to finish here
+ */
+ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
+ err = wait_event_interruptible(vp_wq,
+ (atomic_read(&vp_pinned) < chan->p9_max_pages));
+ if (err == -ERESTARTSYS)
+ return err;
+ }
+ err = p9_payload_gup(data, &nr_pages, pages, write);
+ if (err < 0)
return err;
- } else {
- atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
+ atomic_add(nr_pages, &vp_pinned);
+ } else {
+ /* kernel buffer, no need to pin pages */
+ int s, index = 0;
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+ pages[index++] = kmap_to_page(data);
+ data += s;
+ nr_pages--;
}
+ nr_pages = count;
}
+ return nr_pages;
+}
-req_retry_pinned:
- spin_lock_irqsave(&chan->lock, flags);
+/**
+ * p9_virtio_zc_request - issue a zero copy request
+ * @client: client instance issuing the request
+ * @req: request to be issued
+ * @uidata: user bffer that should be ued for zero copy read
+ * @uodata: user buffer that shoud be user for zero copy write
+ * @inlen: read buffer size
+ * @olen: write buffer size
+ * @hdrlen: reader header size, This is the size of response protocol data
+ *
+ */
+static int
+p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
+ char *uidata, char *uodata, int inlen,
+ int outlen, int in_hdr_len, int kern_buf)
+{
+ int in, out, err;
+ unsigned long flags;
+ int in_nr_pages = 0, out_nr_pages = 0;
+ struct page **in_pages = NULL, **out_pages = NULL;
+ struct virtio_chan *chan = client->trans;
- /* Handle out VirtIO ring buffers */
- out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
- req->tc->size);
-
- if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
- /* We have additional write payload buffer to take care */
- if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
- outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
- pdata_off, rpinfo->rp_data, pdata_len);
- } else {
- char *pbuf;
- if (req->tc->pubuf)
- pbuf = (__force char *) req->tc->pubuf;
- else
- pbuf = req->tc->pkbuf;
- outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
- req->tc->pbuf_size);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+
+ if (uodata) {
+ out_nr_pages = p9_nr_pages(uodata, outlen);
+ out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
+ GFP_NOFS);
+ if (!out_pages) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
+ out_nr_pages, 0, kern_buf);
+ if (out_nr_pages < 0) {
+ err = out_nr_pages;
+ kfree(out_pages);
+ out_pages = NULL;
+ goto err_out;
}
- out += outp;
}
-
- /* Handle in VirtIO ring buffers */
- if (req->tc->pbuf_size &&
- ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
- /*
- * Take care of additional Read payload.
- * 11 is the read/write header = PDU Header(7) + IO Size (4).
- * Arrange in such a way that server places header in the
- * alloced memory and payload onto the user buffer.
- */
- inp = pack_sg_list(chan->sg, out,
- VIRTQUEUE_NUM, req->rc->sdata, 11);
- /*
- * Running executables in the filesystem may result in
- * a read request with kernel buffer as opposed to user buffer.
- */
- if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
- in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
- pdata_off, rpinfo->rp_data, pdata_len);
- } else {
- char *pbuf;
- if (req->tc->pubuf)
- pbuf = (__force char *) req->tc->pubuf;
- else
- pbuf = req->tc->pkbuf;
-
- in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
- pbuf, req->tc->pbuf_size);
+ if (uidata) {
+ in_nr_pages = p9_nr_pages(uidata, inlen);
+ in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
+ GFP_NOFS);
+ if (!in_pages) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
+ in_nr_pages, 1, kern_buf);
+ if (in_nr_pages < 0) {
+ err = in_nr_pages;
+ kfree(in_pages);
+ in_pages = NULL;
+ goto err_out;
}
- in += inp;
- } else {
- in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
- req->rc->sdata, req->rc->capacity);
}
+ req->status = REQ_STATUS_SENT;
+req_retry_pinned:
+ spin_lock_irqsave(&chan->lock, flags);
+ /* out data */
+ out = pack_sg_list(chan->sg, 0,
+ VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
+
+ if (out_pages)
+ out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
+ out_pages, out_nr_pages, uodata, outlen);
+ /*
+ * Take care of in data
+ * For example TREAD have 11.
+ * 11 is the read/write header = PDU Header(7) + IO Size (4).
+ * Arrange in such a way that server places header in the
+ * alloced memory and payload onto the user buffer.
+ */
+ in = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
+ if (in_pages)
+ in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
+ in_pages, in_nr_pages, uidata, inlen);
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
if (err < 0) {
@@ -376,28 +420,45 @@ req_retry_pinned:
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
- return err;
+ goto err_out;
P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: "
- "virtio rpc add_buf returned failure");
- if (rpinfo && rpinfo->rp_alloc)
- kfree(rpinfo);
- return -EIO;
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ err = -EIO;
+ goto err_out;
}
}
-
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
-
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
- return 0;
+ err = wait_event_interruptible(*req->wq,
+ req->status >= REQ_STATUS_RCVD);
+ /*
+ * Non kernel buffers are pinned, unpin them
+ */
+err_out:
+ if (!kern_buf) {
+ if (in_pages) {
+ p9_release_pages(in_pages, in_nr_pages);
+ atomic_sub(in_nr_pages, &vp_pinned);
+ }
+ if (out_pages) {
+ p9_release_pages(out_pages, out_nr_pages);
+ atomic_sub(out_nr_pages, &vp_pinned);
+ }
+ /* wakeup anybody waiting for slots to pin pages */
+ wake_up(&vp_wq);
+ }
+ kfree(in_pages);
+ kfree(out_pages);
+ return err;
}
static ssize_t p9_mount_tag_show(struct device *dev,
@@ -591,8 +652,8 @@ static struct p9_trans_module p9_virtio_trans = {
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
+ .zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
-
/*
* We leave one entry for input and one entry for response
* headers. We also skip one more entry to accomodate, address
@@ -600,7 +661,6 @@ static struct p9_trans_module p9_virtio_trans = {
* page in zero copy.
*/
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
- .pref = P9_TRANS_PREF_PAYLOAD_SEP,
.def = 0,
.owner = THIS_MODULE,
};
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 50dce79..173a2e8 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -39,6 +39,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
int sysctl_aarp_tick_time = AARP_TICK_TIME;
@@ -779,87 +780,87 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
}
switch (function) {
- case AARP_REPLY:
- if (!unresolved_count) /* Speed up */
- break;
-
- /* Find the entry. */
- a = __aarp_find_entry(unresolved[hash], dev, &sa);
- if (!a || dev != a->dev)
- break;
+ case AARP_REPLY:
+ if (!unresolved_count) /* Speed up */
+ break;
- /* We can fill one in - this is good. */
- memcpy(a->hwaddr, ea->hw_src, ETH_ALEN);
- __aarp_resolved(&unresolved[hash], a, hash);
- if (!unresolved_count)
- mod_timer(&aarp_timer,
- jiffies + sysctl_aarp_expiry_time);
+ /* Find the entry. */
+ a = __aarp_find_entry(unresolved[hash], dev, &sa);
+ if (!a || dev != a->dev)
break;
- case AARP_REQUEST:
- case AARP_PROBE:
+ /* We can fill one in - this is good. */
+ memcpy(a->hwaddr, ea->hw_src, ETH_ALEN);
+ __aarp_resolved(&unresolved[hash], a, hash);
+ if (!unresolved_count)
+ mod_timer(&aarp_timer,
+ jiffies + sysctl_aarp_expiry_time);
+ break;
+
+ case AARP_REQUEST:
+ case AARP_PROBE:
+
+ /*
+ * If it is my address set ma to my address and reply.
+ * We can treat probe and request the same. Probe
+ * simply means we shouldn't cache the querying host,
+ * as in a probe they are proposing an address not
+ * using one.
+ *
+ * Support for proxy-AARP added. We check if the
+ * address is one of our proxies before we toss the
+ * packet out.
+ */
+
+ sa.s_node = ea->pa_dst_node;
+ sa.s_net = ea->pa_dst_net;
+
+ /* See if we have a matching proxy. */
+ ma = __aarp_proxy_find(dev, &sa);
+ if (!ma)
+ ma = &ifa->address;
+ else { /* We need to make a copy of the entry. */
+ da.s_node = sa.s_node;
+ da.s_net = sa.s_net;
+ ma = &da;
+ }
+ if (function == AARP_PROBE) {
/*
- * If it is my address set ma to my address and reply.
- * We can treat probe and request the same. Probe
- * simply means we shouldn't cache the querying host,
- * as in a probe they are proposing an address not
- * using one.
- *
- * Support for proxy-AARP added. We check if the
- * address is one of our proxies before we toss the
- * packet out.
+ * A probe implies someone trying to get an
+ * address. So as a precaution flush any
+ * entries we have for this address.
*/
+ a = __aarp_find_entry(resolved[sa.s_node %
+ (AARP_HASH_SIZE - 1)],
+ skb->dev, &sa);
- sa.s_node = ea->pa_dst_node;
- sa.s_net = ea->pa_dst_net;
-
- /* See if we have a matching proxy. */
- ma = __aarp_proxy_find(dev, &sa);
- if (!ma)
- ma = &ifa->address;
- else { /* We need to make a copy of the entry. */
- da.s_node = sa.s_node;
- da.s_net = sa.s_net;
- ma = &da;
- }
-
- if (function == AARP_PROBE) {
- /*
- * A probe implies someone trying to get an
- * address. So as a precaution flush any
- * entries we have for this address.
- */
- a = __aarp_find_entry(resolved[sa.s_node %
- (AARP_HASH_SIZE - 1)],
- skb->dev, &sa);
-
- /*
- * Make it expire next tick - that avoids us
- * getting into a probe/flush/learn/probe/
- * flush/learn cycle during probing of a slow
- * to respond host addr.
- */
- if (a) {
- a->expires_at = jiffies - 1;
- mod_timer(&aarp_timer, jiffies +
- sysctl_aarp_tick_time);
- }
+ /*
+ * Make it expire next tick - that avoids us
+ * getting into a probe/flush/learn/probe/
+ * flush/learn cycle during probing of a slow
+ * to respond host addr.
+ */
+ if (a) {
+ a->expires_at = jiffies - 1;
+ mod_timer(&aarp_timer, jiffies +
+ sysctl_aarp_tick_time);
}
+ }
- if (sa.s_node != ma->s_node)
- break;
+ if (sa.s_node != ma->s_node)
+ break;
- if (sa.s_net && ma->s_net && sa.s_net != ma->s_net)
- break;
+ if (sa.s_net && ma->s_net && sa.s_net != ma->s_net)
+ break;
- sa.s_node = ea->pa_src_node;
- sa.s_net = ea->pa_src_net;
+ sa.s_node = ea->pa_src_node;
+ sa.s_net = ea->pa_src_net;
- /* aarp_my_address has found the address to use for us.
- */
- aarp_send_reply(dev, ma, &sa, ea->hw_src);
- break;
+ /* aarp_my_address has found the address to use for us.
+ */
+ aarp_send_reply(dev, ma, &sa, ea->hw_src);
+ break;
}
unlock:
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 6ef0e76..b5b1a22 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -14,6 +14,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/atalk.h>
+#include <linux/export.h>
static __inline__ struct atalk_iface *atalk_get_interface_idx(loff_t pos)
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 956a530..79aaac2 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -684,192 +684,192 @@ static int atif_ioctl(int cmd, void __user *arg)
atif = atalk_find_dev(dev);
switch (cmd) {
- case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (sa->sat_family != AF_APPLETALK)
- return -EINVAL;
- if (dev->type != ARPHRD_ETHER &&
- dev->type != ARPHRD_LOOPBACK &&
- dev->type != ARPHRD_LOCALTLK &&
- dev->type != ARPHRD_PPP)
- return -EPROTONOSUPPORT;
-
- nr = (struct atalk_netrange *)&sa->sat_zero[0];
- add_route = 1;
-
- /*
- * if this is a point-to-point iface, and we already
- * have an iface for this AppleTalk address, then we
- * should not add a route
- */
- if ((dev->flags & IFF_POINTOPOINT) &&
- atalk_find_interface(sa->sat_addr.s_net,
- sa->sat_addr.s_node)) {
- printk(KERN_DEBUG "AppleTalk: point-to-point "
- "interface added with "
- "existing address\n");
- add_route = 0;
- }
-
- /*
- * Phase 1 is fine on LocalTalk but we don't do
- * EtherTalk phase 1. Anyone wanting to add it go ahead.
- */
- if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
- return -EPROTONOSUPPORT;
- if (sa->sat_addr.s_node == ATADDR_BCAST ||
- sa->sat_addr.s_node == 254)
- return -EINVAL;
- if (atif) {
- /* Already setting address */
- if (atif->status & ATIF_PROBE)
- return -EBUSY;
-
- atif->address.s_net = sa->sat_addr.s_net;
- atif->address.s_node = sa->sat_addr.s_node;
- atrtr_device_down(dev); /* Flush old routes */
- } else {
- atif = atif_add_device(dev, &sa->sat_addr);
- if (!atif)
- return -ENOMEM;
- }
- atif->nets = *nr;
-
- /*
- * Check if the chosen address is used. If so we
- * error and atalkd will try another.
- */
+ case SIOCSIFADDR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (sa->sat_family != AF_APPLETALK)
+ return -EINVAL;
+ if (dev->type != ARPHRD_ETHER &&
+ dev->type != ARPHRD_LOOPBACK &&
+ dev->type != ARPHRD_LOCALTLK &&
+ dev->type != ARPHRD_PPP)
+ return -EPROTONOSUPPORT;
+
+ nr = (struct atalk_netrange *)&sa->sat_zero[0];
+ add_route = 1;
- if (!(dev->flags & IFF_LOOPBACK) &&
- !(dev->flags & IFF_POINTOPOINT) &&
- atif_probe_device(atif) < 0) {
- atif_drop_device(dev);
- return -EADDRINUSE;
- }
-
- /* Hey it worked - add the direct routes */
- sa = (struct sockaddr_at *)&rtdef.rt_gateway;
- sa->sat_family = AF_APPLETALK;
- sa->sat_addr.s_net = atif->address.s_net;
- sa->sat_addr.s_node = atif->address.s_node;
- sa = (struct sockaddr_at *)&rtdef.rt_dst;
- rtdef.rt_flags = RTF_UP;
- sa->sat_family = AF_APPLETALK;
- sa->sat_addr.s_node = ATADDR_ANYNODE;
- if (dev->flags & IFF_LOOPBACK ||
- dev->flags & IFF_POINTOPOINT)
- rtdef.rt_flags |= RTF_HOST;
-
- /* Routerless initial state */
- if (nr->nr_firstnet == htons(0) &&
- nr->nr_lastnet == htons(0xFFFE)) {
- sa->sat_addr.s_net = atif->address.s_net;
- atrtr_create(&rtdef, dev);
- atrtr_set_default(dev);
- } else {
- limit = ntohs(nr->nr_lastnet);
- if (limit - ntohs(nr->nr_firstnet) > 4096) {
- printk(KERN_WARNING "Too many routes/"
- "iface.\n");
- return -EINVAL;
- }
- if (add_route)
- for (ct = ntohs(nr->nr_firstnet);
- ct <= limit; ct++) {
- sa->sat_addr.s_net = htons(ct);
- atrtr_create(&rtdef, dev);
- }
- }
- dev_mc_add_global(dev, aarp_mcast);
- return 0;
+ /*
+ * if this is a point-to-point iface, and we already
+ * have an iface for this AppleTalk address, then we
+ * should not add a route
+ */
+ if ((dev->flags & IFF_POINTOPOINT) &&
+ atalk_find_interface(sa->sat_addr.s_net,
+ sa->sat_addr.s_node)) {
+ printk(KERN_DEBUG "AppleTalk: point-to-point "
+ "interface added with "
+ "existing address\n");
+ add_route = 0;
+ }
- case SIOCGIFADDR:
+ /*
+ * Phase 1 is fine on LocalTalk but we don't do
+ * EtherTalk phase 1. Anyone wanting to add it go ahead.
+ */
+ if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
+ return -EPROTONOSUPPORT;
+ if (sa->sat_addr.s_node == ATADDR_BCAST ||
+ sa->sat_addr.s_node == 254)
+ return -EINVAL;
+ if (atif) {
+ /* Already setting address */
+ if (atif->status & ATIF_PROBE)
+ return -EBUSY;
+
+ atif->address.s_net = sa->sat_addr.s_net;
+ atif->address.s_node = sa->sat_addr.s_node;
+ atrtr_device_down(dev); /* Flush old routes */
+ } else {
+ atif = atif_add_device(dev, &sa->sat_addr);
if (!atif)
- return -EADDRNOTAVAIL;
+ return -ENOMEM;
+ }
+ atif->nets = *nr;
- sa->sat_family = AF_APPLETALK;
- sa->sat_addr = atif->address;
- break;
+ /*
+ * Check if the chosen address is used. If so we
+ * error and atalkd will try another.
+ */
- case SIOCGIFBRDADDR:
- if (!atif)
- return -EADDRNOTAVAIL;
+ if (!(dev->flags & IFF_LOOPBACK) &&
+ !(dev->flags & IFF_POINTOPOINT) &&
+ atif_probe_device(atif) < 0) {
+ atif_drop_device(dev);
+ return -EADDRINUSE;
+ }
- sa->sat_family = AF_APPLETALK;
+ /* Hey it worked - add the direct routes */
+ sa = (struct sockaddr_at *)&rtdef.rt_gateway;
+ sa->sat_family = AF_APPLETALK;
+ sa->sat_addr.s_net = atif->address.s_net;
+ sa->sat_addr.s_node = atif->address.s_node;
+ sa = (struct sockaddr_at *)&rtdef.rt_dst;
+ rtdef.rt_flags = RTF_UP;
+ sa->sat_family = AF_APPLETALK;
+ sa->sat_addr.s_node = ATADDR_ANYNODE;
+ if (dev->flags & IFF_LOOPBACK ||
+ dev->flags & IFF_POINTOPOINT)
+ rtdef.rt_flags |= RTF_HOST;
+
+ /* Routerless initial state */
+ if (nr->nr_firstnet == htons(0) &&
+ nr->nr_lastnet == htons(0xFFFE)) {
sa->sat_addr.s_net = atif->address.s_net;
- sa->sat_addr.s_node = ATADDR_BCAST;
- break;
-
- case SIOCATALKDIFADDR:
- case SIOCDIFADDR:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (sa->sat_family != AF_APPLETALK)
- return -EINVAL;
- atalk_dev_down(dev);
- break;
-
- case SIOCSARP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (sa->sat_family != AF_APPLETALK)
+ atrtr_create(&rtdef, dev);
+ atrtr_set_default(dev);
+ } else {
+ limit = ntohs(nr->nr_lastnet);
+ if (limit - ntohs(nr->nr_firstnet) > 4096) {
+ printk(KERN_WARNING "Too many routes/"
+ "iface.\n");
return -EINVAL;
- /*
- * for now, we only support proxy AARP on ELAP;
- * we should be able to do it for LocalTalk, too.
- */
- if (dev->type != ARPHRD_ETHER)
- return -EPROTONOSUPPORT;
-
- /*
- * atif points to the current interface on this network;
- * we aren't concerned about its current status (at
- * least for now), but it has all the settings about
- * the network we're going to probe. Consequently, it
- * must exist.
- */
- if (!atif)
- return -EADDRNOTAVAIL;
+ }
+ if (add_route)
+ for (ct = ntohs(nr->nr_firstnet);
+ ct <= limit; ct++) {
+ sa->sat_addr.s_net = htons(ct);
+ atrtr_create(&rtdef, dev);
+ }
+ }
+ dev_mc_add_global(dev, aarp_mcast);
+ return 0;
+
+ case SIOCGIFADDR:
+ if (!atif)
+ return -EADDRNOTAVAIL;
+
+ sa->sat_family = AF_APPLETALK;
+ sa->sat_addr = atif->address;
+ break;
+
+ case SIOCGIFBRDADDR:
+ if (!atif)
+ return -EADDRNOTAVAIL;
+
+ sa->sat_family = AF_APPLETALK;
+ sa->sat_addr.s_net = atif->address.s_net;
+ sa->sat_addr.s_node = ATADDR_BCAST;
+ break;
+
+ case SIOCATALKDIFADDR:
+ case SIOCDIFADDR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (sa->sat_family != AF_APPLETALK)
+ return -EINVAL;
+ atalk_dev_down(dev);
+ break;
- nr = (struct atalk_netrange *)&(atif->nets);
- /*
- * Phase 1 is fine on Localtalk but we don't do
- * Ethertalk phase 1. Anyone wanting to add it go ahead.
- */
- if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
- return -EPROTONOSUPPORT;
+ case SIOCSARP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (sa->sat_family != AF_APPLETALK)
+ return -EINVAL;
+ /*
+ * for now, we only support proxy AARP on ELAP;
+ * we should be able to do it for LocalTalk, too.
+ */
+ if (dev->type != ARPHRD_ETHER)
+ return -EPROTONOSUPPORT;
- if (sa->sat_addr.s_node == ATADDR_BCAST ||
- sa->sat_addr.s_node == 254)
- return -EINVAL;
+ /*
+ * atif points to the current interface on this network;
+ * we aren't concerned about its current status (at
+ * least for now), but it has all the settings about
+ * the network we're going to probe. Consequently, it
+ * must exist.
+ */
+ if (!atif)
+ return -EADDRNOTAVAIL;
- /*
- * Check if the chosen address is used. If so we
- * error and ATCP will try another.
- */
- if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
- return -EADDRINUSE;
+ nr = (struct atalk_netrange *)&(atif->nets);
+ /*
+ * Phase 1 is fine on Localtalk but we don't do
+ * Ethertalk phase 1. Anyone wanting to add it go ahead.
+ */
+ if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
+ return -EPROTONOSUPPORT;
- /*
- * We now have an address on the local network, and
- * the AARP code will defend it for us until we take it
- * down. We don't set up any routes right now, because
- * ATCP will install them manually via SIOCADDRT.
- */
- break;
+ if (sa->sat_addr.s_node == ATADDR_BCAST ||
+ sa->sat_addr.s_node == 254)
+ return -EINVAL;
- case SIOCDARP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (sa->sat_family != AF_APPLETALK)
- return -EINVAL;
- if (!atif)
- return -EADDRNOTAVAIL;
+ /*
+ * Check if the chosen address is used. If so we
+ * error and ATCP will try another.
+ */
+ if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
+ return -EADDRINUSE;
- /* give to aarp module to remove proxy entry */
- aarp_proxy_remove(atif->dev, &(sa->sat_addr));
- return 0;
+ /*
+ * We now have an address on the local network, and
+ * the AARP code will defend it for us until we take it
+ * down. We don't set up any routes right now, because
+ * ATCP will install them manually via SIOCADDRT.
+ */
+ break;
+
+ case SIOCDARP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (sa->sat_family != AF_APPLETALK)
+ return -EINVAL;
+ if (!atif)
+ return -EADDRNOTAVAIL;
+
+ /* give to aarp module to remove proxy entry */
+ aarp_proxy_remove(atif->dev, &(sa->sat_addr));
+ return 0;
}
return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
@@ -884,25 +884,25 @@ static int atrtr_ioctl(unsigned int cmd, void __user *arg)
return -EFAULT;
switch (cmd) {
- case SIOCDELRT:
- if (rt.rt_dst.sa_family != AF_APPLETALK)
- return -EINVAL;
- return atrtr_delete(&((struct sockaddr_at *)
- &rt.rt_dst)->sat_addr);
-
- case SIOCADDRT: {
- struct net_device *dev = NULL;
- if (rt.rt_dev) {
- char name[IFNAMSIZ];
- if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
- return -EFAULT;
- name[IFNAMSIZ-1] = '\0';
- dev = __dev_get_by_name(&init_net, name);
- if (!dev)
- return -ENODEV;
- }
- return atrtr_create(&rt, dev);
+ case SIOCDELRT:
+ if (rt.rt_dst.sa_family != AF_APPLETALK)
+ return -EINVAL;
+ return atrtr_delete(&((struct sockaddr_at *)
+ &rt.rt_dst)->sat_addr);
+
+ case SIOCADDRT: {
+ struct net_device *dev = NULL;
+ if (rt.rt_dev) {
+ char name[IFNAMSIZ];
+ if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
+ return -EFAULT;
+ name[IFNAMSIZ-1] = '\0';
+ dev = __dev_get_by_name(&init_net, name);
+ if (!dev)
+ return -ENODEV;
}
+ return atrtr_create(&rt, dev);
+ }
}
return -EINVAL;
}
@@ -951,13 +951,12 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
/* checksum stuff in frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
-
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
@@ -1495,8 +1494,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
/* Queue packet (standard) */
- skb->sk = sock;
-
if (sock_queue_rcv_skb(sock, skb) < 0)
goto drop;
@@ -1650,7 +1647,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (!skb)
goto out;
- skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
skb_reserve(skb, dev->hard_header_len);
skb->dev = dev;
@@ -1741,7 +1737,6 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
size_t size, int flags)
{
struct sock *sk = sock->sk;
- struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
struct ddpehdr *ddp;
int copied = 0;
int offset = 0;
@@ -1770,14 +1765,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
}
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
- if (!err) {
- if (sat) {
- sat->sat_family = AF_APPLETALK;
- sat->sat_port = ddp->deh_sport;
- sat->sat_addr.s_node = ddp->deh_snode;
- sat->sat_addr.s_net = ddp->deh_snet;
- }
- msg->msg_namelen = sizeof(*sat);
+ if (!err && msg->msg_name) {
+ struct sockaddr_at *sat = msg->msg_name;
+ sat->sat_family = AF_APPLETALK;
+ sat->sat_port = ddp->deh_sport;
+ sat->sat_addr.s_node = ddp->deh_snode;
+ sat->sat_addr.s_net = ddp->deh_snet;
+ msg->msg_namelen = sizeof(*sat);
}
skb_free_datagram(sk, skb); /* Free the datagram. */
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index fc63526..f41f026 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -9,7 +9,7 @@
#include <linux/sonet.h>
#include <linux/bitops.h>
#include <linux/errno.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
int atm_charge(struct atm_vcc *vcc, int truesize)
{
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 5889074..8523940 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -37,7 +37,7 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h> /* for htons etc. */
#include <asm/system.h> /* save/restore_flags */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "common.h"
#include "resources.h"
@@ -271,10 +271,8 @@ static const struct neigh_ops clip_neigh_ops = {
.family = AF_INET,
.solicit = clip_neigh_solicit,
.error_report = clip_neigh_error,
- .output = dev_queue_xmit,
- .connected_output = dev_queue_xmit,
- .hh_output = dev_queue_xmit,
- .queue_xmit = dev_queue_xmit,
+ .output = neigh_direct_output,
+ .connected_output = neigh_direct_output,
};
static int clip_constructor(struct neighbour *neigh)
diff --git a/net/atm/common.c b/net/atm/common.c
index 4b263b8..0ca06e8 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/poll.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
@@ -500,8 +500,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int copied, error = -EINVAL;
- msg->msg_namelen = 0;
-
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
diff --git a/net/atm/lec.c b/net/atm/lec.c
index ba48daa..f1964ca 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = {
.ndo_start_xmit = lec_start_xmit,
.ndo_change_mtu = lec_change_mtu,
.ndo_tx_timeout = lec_tx_timeout,
- .ndo_set_multicast_list = lec_set_multicast_list,
+ .ndo_set_rx_mode = lec_set_multicast_list,
};
static const unsigned char lec_ctrl_magic[] = {
@@ -1335,7 +1335,7 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/param.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/inetdevice.h>
#include <net/route.h>
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 3ccca42..aa97240 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1005,7 +1005,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
struct mpoa_client *mpc;
struct lec_priv *priv;
- dev = (struct net_device *)dev_ptr;
+ dev = dev_ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index e9aced0..db4a11c 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/atm.h>
diff --git a/net/atm/proc.c b/net/atm/proc.c
index be3afde..0d020de 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -27,7 +27,7 @@
#include <net/atmclip.h>
#include <linux/uaccess.h>
#include <linux/param.h> /* for HZ */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "resources.h"
#include "common.h" /* atm_proc_init prototype */
#include "signaling.h" /* to get sigd - ugly too */
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index db0dd47..ae03240 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <net/sock.h> /* for sock_no_* */
#include "resources.h" /* devs and vccs */
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 754ee47..1281049 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -20,6 +20,7 @@
#include <linux/bitops.h>
#include <net/sock.h> /* for sock_no_* */
#include <linux/uaccess.h>
+#include <linux/export.h>
#include "resources.h"
#include "common.h" /* common for PVCs and SVCs */
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 86ac37f..7b8db0e 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1635,11 +1635,11 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (msg->msg_namelen != 0) {
- struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
+ if (msg->msg_name) {
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
+ struct sockaddr_ax25 *sax = msg->msg_name;
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a169084..87fddab 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static ax25_route *ax25_route_list;
static DEFINE_RWLOCK(ax25_route_lock);
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index d349be9..4c83137 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -37,6 +37,7 @@
#include <linux/stat.h>
#include <linux/netfilter.h>
#include <linux/sysctl.h>
+#include <linux/export.h>
#include <net/ip.h>
#include <net/arp.h>
diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c
index 665a880..1d9eb7c 100644
--- a/net/dcb/dcbevent.c
+++ b/net/dcb/dcbevent.c
@@ -19,6 +19,7 @@
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
+#include <linux/export.h>
static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d8f262f..2f9517d 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -25,6 +25,7 @@
#include <linux/dcbnl.h>
#include <net/dcbevent.h>
#include <linux/rtnetlink.h>
+#include <linux/module.h>
#include <net/sock.h>
/**
@@ -1167,64 +1168,6 @@ err:
return ret;
}
-/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
- * be completed the entire msg is aborted and error value is returned.
- * No attempt is made to reconcile the case where only part of the
- * cmd can be completed.
- */
-static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
-{
- const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
- int err = -EOPNOTSUPP;
-
- if (!ops)
- goto err;
-
- err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
- tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
- if (err)
- goto err;
-
- if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
- struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
- err = ops->ieee_setets(netdev, ets);
- if (err)
- goto err;
- }
-
- if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
- struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
- err = ops->ieee_setpfc(netdev, pfc);
- if (err)
- goto err;
- }
-
- if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
- struct nlattr *attr;
- int rem;
-
- nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
- struct dcb_app *app_data;
- if (nla_type(attr) != DCB_ATTR_IEEE_APP)
- continue;
- app_data = nla_data(attr);
- if (ops->ieee_setapp)
- err = ops->ieee_setapp(netdev, app_data);
- else
- err = dcb_setapp(netdev, app_data);
- if (err)
- goto err;
- }
- }
-
-err:
- dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
- pid, seq, flags);
- return err;
-}
-
static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
int app_nested_type, int app_info_type,
int app_entry_type)
@@ -1280,29 +1223,13 @@ nla_put_failure:
}
/* Handle IEEE 802.1Qaz GET commands. */
-static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
{
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *ieee, *app;
struct dcb_app_type *itr;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- int err;
-
- if (!ops)
- return -EOPNOTSUPP;
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_IEEE_GET;
+ int dcbx;
+ int err = -EMSGSIZE;
NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
@@ -1332,7 +1259,7 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
spin_lock(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
- if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
+ if (itr->ifindex == netdev->ifindex) {
err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
&itr->app);
if (err) {
@@ -1341,6 +1268,12 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
}
}
}
+
+ if (netdev->dcbnl_ops->getdcbx)
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+ else
+ dcbx = -EOPNOTSUPP;
+
spin_unlock(&dcb_lock);
nla_nest_end(skb, app);
@@ -1371,15 +1304,414 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
}
nla_nest_end(skb, ieee);
- nlmsg_end(skb, nlh);
+ if (dcbx >= 0) {
+ err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ return 0;
- return rtnl_unicast(skb, &init_net, pid);
nla_put_failure:
- nlmsg_cancel(skb, nlh);
-nlmsg_failure:
- kfree_skb(skb);
- return -1;
+ return err;
+}
+
+static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
+ int dir)
+{
+ u8 pgid, up_map, prio, tc_pct;
+ const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
+ int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
+ struct nlattr *pg = nla_nest_start(skb, i);
+
+ if (!pg)
+ goto nla_put_failure;
+
+ for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
+ struct nlattr *tc_nest = nla_nest_start(skb, i);
+
+ if (!tc_nest)
+ goto nla_put_failure;
+
+ pgid = DCB_ATTR_VALUE_UNDEFINED;
+ prio = DCB_ATTR_VALUE_UNDEFINED;
+ tc_pct = DCB_ATTR_VALUE_UNDEFINED;
+ up_map = DCB_ATTR_VALUE_UNDEFINED;
+
+ if (!dir)
+ ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
+ &prio, &pgid, &tc_pct, &up_map);
+ else
+ ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
+ &prio, &pgid, &tc_pct, &up_map);
+
+ NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
+ NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
+ NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
+ NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
+ nla_nest_end(skb, tc_nest);
+ }
+
+ for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
+ tc_pct = DCB_ATTR_VALUE_UNDEFINED;
+
+ if (!dir)
+ ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
+ &tc_pct);
+ else
+ ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
+ &tc_pct);
+ NLA_PUT_U8(skb, i, tc_pct);
+ }
+ nla_nest_end(skb, pg);
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nlattr *cee, *app;
+ struct dcb_app_type *itr;
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ int dcbx, i, err = -EMSGSIZE;
+ u8 value;
+
+ NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
+
+ cee = nla_nest_start(skb, DCB_ATTR_CEE);
+ if (!cee)
+ goto nla_put_failure;
+
+ /* local pg */
+ if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
+ err = dcbnl_cee_pg_fill(skb, netdev, 1);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
+ err = dcbnl_cee_pg_fill(skb, netdev, 0);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ /* local pfc */
+ if (ops->getpfccfg) {
+ struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
+
+ if (!pfc_nest)
+ goto nla_put_failure;
+
+ for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
+ ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
+ NLA_PUT_U8(skb, i, value);
+ }
+ nla_nest_end(skb, pfc_nest);
+ }
+
+ /* local app */
+ spin_lock(&dcb_lock);
+ app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
+ if (!app)
+ goto dcb_unlock;
+
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->ifindex == netdev->ifindex) {
+ struct nlattr *app_nest = nla_nest_start(skb,
+ DCB_ATTR_APP);
+ if (!app_nest)
+ goto dcb_unlock;
+
+ err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
+ itr->app.selector);
+ if (err)
+ goto dcb_unlock;
+
+ err = nla_put_u16(skb, DCB_APP_ATTR_ID,
+ itr->app.protocol);
+ if (err)
+ goto dcb_unlock;
+
+ err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
+ itr->app.priority);
+ if (err)
+ goto dcb_unlock;
+
+ nla_nest_end(skb, app_nest);
+ }
+ }
+ nla_nest_end(skb, app);
+
+ if (netdev->dcbnl_ops->getdcbx)
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+ else
+ dcbx = -EOPNOTSUPP;
+
+ spin_unlock(&dcb_lock);
+
+ /* features flags */
+ if (ops->getfeatcfg) {
+ struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
+ if (!feat)
+ goto nla_put_failure;
+
+ for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
+ i++)
+ if (!ops->getfeatcfg(netdev, i, &value))
+ NLA_PUT_U8(skb, i, value);
+
+ nla_nest_end(skb, feat);
+ }
+
+ /* peer info if available */
+ if (ops->cee_peer_getpg) {
+ struct cee_pg pg;
+ memset(&pg, 0, sizeof(pg));
+ err = ops->cee_peer_getpg(netdev, &pg);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+ }
+
+ if (ops->cee_peer_getpfc) {
+ struct cee_pfc pfc;
+ memset(&pfc, 0, sizeof(pfc));
+ err = ops->cee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+ }
+
+ if (ops->peer_getappinfo && ops->peer_getapptable) {
+ err = dcbnl_build_peer_app(netdev, skb,
+ DCB_ATTR_CEE_PEER_APP_TABLE,
+ DCB_ATTR_CEE_PEER_APP_INFO,
+ DCB_ATTR_CEE_PEER_APP);
+ if (err)
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, cee);
+
+ /* DCBX state */
+ if (dcbx >= 0) {
+ err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
+ if (err)
+ goto nla_put_failure;
+ }
+ return 0;
+
+dcb_unlock:
+ spin_unlock(&dcb_lock);
+nla_put_failure:
+ return err;
+}
+
+static int dcbnl_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid, int dcbx_ver)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct dcbmsg *dcb;
+ const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
+ int err;
+
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
+ if (nlh == NULL) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
+
+ dcb = NLMSG_DATA(nlh);
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = cmd;
+
+ if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
+ err = dcbnl_ieee_fill(skb, dev);
+ else
+ err = dcbnl_cee_fill(skb, dev);
+
+ if (err < 0) {
+ /* Report error to broadcast listeners */
+ nlmsg_cancel(skb, nlh);
+ kfree_skb(skb);
+ rtnl_set_sk_err(net, RTNLGRP_DCB, err);
+ } else {
+ /* End nlmsg and notify broadcast listeners */
+ nlmsg_end(skb, nlh);
+ rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
+ }
+
+ return err;
+}
+
+int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid)
+{
+ return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
}
+EXPORT_SYMBOL(dcbnl_ieee_notify);
+
+int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
+ u32 seq, u32 pid)
+{
+ return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
+}
+EXPORT_SYMBOL(dcbnl_cee_notify);
+
+/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
+ * be completed the entire msg is aborted and error value is returned.
+ * No attempt is made to reconcile the case where only part of the
+ * cmd can be completed.
+ */
+static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
+ int err = -EOPNOTSUPP;
+
+ if (!ops)
+ return err;
+
+ if (!tb[DCB_ATTR_IEEE])
+ return -EINVAL;
+
+ err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
+ tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
+ if (err)
+ return err;
+
+ if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
+ struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
+ err = ops->ieee_setets(netdev, ets);
+ if (err)
+ goto err;
+ }
+
+ if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
+ struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
+ err = ops->ieee_setpfc(netdev, pfc);
+ if (err)
+ goto err;
+ }
+
+ if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
+ struct nlattr *attr;
+ int rem;
+
+ nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
+ struct dcb_app *app_data;
+ if (nla_type(attr) != DCB_ATTR_IEEE_APP)
+ continue;
+ app_data = nla_data(attr);
+ if (ops->ieee_setapp)
+ err = ops->ieee_setapp(netdev, app_data);
+ else
+ err = dcb_ieee_setapp(netdev, app_data);
+ if (err)
+ goto err;
+ }
+ }
+
+err:
+ dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
+ pid, seq, flags);
+ dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
+ return err;
+}
+
+static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ struct net *net = dev_net(netdev);
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct dcbmsg *dcb;
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ int err;
+
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+ if (nlh == NULL) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
+
+ dcb = NLMSG_DATA(nlh);
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_IEEE_GET;
+
+ err = dcbnl_ieee_fill(skb, netdev);
+
+ if (err < 0) {
+ nlmsg_cancel(skb, nlh);
+ kfree_skb(skb);
+ } else {
+ nlmsg_end(skb, nlh);
+ err = rtnl_unicast(skb, net, pid);
+ }
+
+ return err;
+}
+
+static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
+ int err = -EOPNOTSUPP;
+
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ if (!tb[DCB_ATTR_IEEE])
+ return -EINVAL;
+
+ err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
+ tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
+ if (err)
+ return err;
+
+ if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
+ struct nlattr *attr;
+ int rem;
+
+ nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
+ struct dcb_app *app_data;
+
+ if (nla_type(attr) != DCB_ATTR_IEEE_APP)
+ continue;
+ app_data = nla_data(attr);
+ if (ops->ieee_delapp)
+ err = ops->ieee_delapp(netdev, app_data);
+ else
+ err = dcb_ieee_delapp(netdev, app_data);
+ if (err)
+ goto err;
+ }
+ }
+
+err:
+ dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
+ pid, seq, flags);
+ dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
+ return err;
+}
+
/* DCBX configuration */
static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
@@ -1527,10 +1859,10 @@ err:
static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
u32 pid, u32 seq, u16 flags)
{
+ struct net *net = dev_net(netdev);
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct dcbmsg *dcb;
- struct nlattr *cee;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
int err;
@@ -1541,53 +1873,26 @@ static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
if (!skb)
return -ENOBUFS;
- nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+ nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+ if (nlh == NULL) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
dcb = NLMSG_DATA(nlh);
dcb->dcb_family = AF_UNSPEC;
dcb->cmd = DCB_CMD_CEE_GET;
- NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
- cee = nla_nest_start(skb, DCB_ATTR_CEE);
- if (!cee)
- goto nla_put_failure;
-
- /* get peer info if available */
- if (ops->cee_peer_getpg) {
- struct cee_pg pg;
- memset(&pg, 0, sizeof(pg));
- err = ops->cee_peer_getpg(netdev, &pg);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
- }
-
- if (ops->cee_peer_getpfc) {
- struct cee_pfc pfc;
- memset(&pfc, 0, sizeof(pfc));
- err = ops->cee_peer_getpfc(netdev, &pfc);
- if (!err)
- NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
- }
+ err = dcbnl_cee_fill(skb, netdev);
- if (ops->peer_getappinfo && ops->peer_getapptable) {
- err = dcbnl_build_peer_app(netdev, skb,
- DCB_ATTR_CEE_PEER_APP_TABLE,
- DCB_ATTR_CEE_PEER_APP_INFO,
- DCB_ATTR_CEE_PEER_APP);
- if (err)
- goto nla_put_failure;
+ if (err < 0) {
+ nlmsg_cancel(skb, nlh);
+ nlmsg_free(skb);
+ } else {
+ nlmsg_end(skb, nlh);
+ err = rtnl_unicast(skb, net, pid);
}
-
- nla_nest_end(skb, cee);
- nlmsg_end(skb, nlh);
-
- return rtnl_unicast(skb, &init_net, pid);
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
-nlmsg_failure:
- kfree_skb(skb);
- return -1;
+ return err;
}
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
@@ -1697,11 +2002,15 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
goto out;
case DCB_CMD_IEEE_SET:
ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags);
goto out;
case DCB_CMD_IEEE_GET:
ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_IEEE_DEL:
+ ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
goto out;
case DCB_CMD_GDCBX:
ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
@@ -1749,7 +2058,7 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
prio = itr->app.priority;
break;
}
@@ -1761,25 +2070,28 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
EXPORT_SYMBOL(dcb_getapp);
/**
- * ixgbe_dcbnl_setapp - add dcb application data to app list
+ * dcb_setapp - add CEE dcb application data to app list
*
- * Priority 0 is the default priority this removes applications
- * from the app list if the priority is set to zero.
+ * Priority 0 is an invalid priority in CEE spec. This routine
+ * removes applications from the app list if the priority is
+ * set to zero.
*/
-u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
+int dcb_setapp(struct net_device *dev, struct dcb_app *new)
{
struct dcb_app_type *itr;
struct dcb_app_type event;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and replace */
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == new->selector &&
itr->app.protocol == new->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
if (new->priority)
itr->app.priority = new->priority;
else {
@@ -1799,7 +2111,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
}
memcpy(&entry->app, new, sizeof(*new));
- strncpy(entry->name, dev->name, IFNAMSIZ);
+ entry->ifindex = dev->ifindex;
list_add(&entry->list, &dcb_app_list);
}
out:
@@ -1809,6 +2121,118 @@ out:
}
EXPORT_SYMBOL(dcb_setapp);
+/**
+ * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
+ *
+ * Helper routine which on success returns a non-zero 802.1Qaz user
+ * priority bitmap otherwise returns 0 to indicate the dcb_app was
+ * not found in APP list.
+ */
+u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app_type *itr;
+ u8 prio = 0;
+
+ spin_lock(&dcb_lock);
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == app->selector &&
+ itr->app.protocol == app->protocol &&
+ itr->ifindex == dev->ifindex) {
+ prio |= 1 << itr->app.priority;
+ }
+ }
+ spin_unlock(&dcb_lock);
+
+ return prio;
+}
+EXPORT_SYMBOL(dcb_ieee_getapp_mask);
+
+/**
+ * dcb_ieee_setapp - add IEEE dcb application data to app list
+ *
+ * This adds Application data to the list. Multiple application
+ * entries may exists for the same selector and protocol as long
+ * as the priorities are different.
+ */
+int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
+{
+ struct dcb_app_type *itr, *entry;
+ struct dcb_app_type event;
+ int err = 0;
+
+ event.ifindex = dev->ifindex;
+ memcpy(&event.app, new, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
+
+ spin_lock(&dcb_lock);
+ /* Search for existing match and abort if found */
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == new->selector &&
+ itr->app.protocol == new->protocol &&
+ itr->app.priority == new->priority &&
+ itr->ifindex == dev->ifindex) {
+ err = -EEXIST;
+ goto out;
+ }
+ }
+
+ /* App entry does not exist add new entry */
+ entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
+ if (!entry) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(&entry->app, new, sizeof(*new));
+ entry->ifindex = dev->ifindex;
+ list_add(&entry->list, &dcb_app_list);
+out:
+ spin_unlock(&dcb_lock);
+ if (!err)
+ call_dcbevent_notifiers(DCB_APP_EVENT, &event);
+ return err;
+}
+EXPORT_SYMBOL(dcb_ieee_setapp);
+
+/**
+ * dcb_ieee_delapp - delete IEEE dcb application data from list
+ *
+ * This removes a matching APP data from the APP list
+ */
+int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
+{
+ struct dcb_app_type *itr;
+ struct dcb_app_type event;
+ int err = -ENOENT;
+
+ event.ifindex = dev->ifindex;
+ memcpy(&event.app, del, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
+
+ spin_lock(&dcb_lock);
+ /* Search for existing match and remove it. */
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == del->selector &&
+ itr->app.protocol == del->protocol &&
+ itr->app.priority == del->priority &&
+ itr->ifindex == dev->ifindex) {
+ list_del(&itr->list);
+ kfree(itr);
+ err = 0;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock(&dcb_lock);
+ if (!err)
+ call_dcbevent_notifiers(DCB_APP_EVENT, &event);
+ return err;
+}
+EXPORT_SYMBOL(dcb_ieee_delapp);
+
static void dcb_flushapp(void)
{
struct dcb_app_type *app;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 25b7a8d..ba07824 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -12,6 +12,7 @@
#include "dccp.h"
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
static struct kmem_cache *dccp_ackvec_slab;
static struct kmem_cache *dccp_ackvec_record_slab;
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 36479ca..48b585a 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -118,7 +118,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
if (ccid_ops->ccid_hc_tx_slab == NULL)
goto out_free_rx_slab;
- pr_info("CCID: Activated CCID %d (%s)\n",
+ pr_info("DCCP: Activated CCID %d (%s)\n",
ccid_ops->ccid_id, ccid_ops->ccid_name);
err = 0;
out:
@@ -136,7 +136,7 @@ static void ccid_deactivate(struct ccid_operations *ccid_ops)
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
ccid_ops->ccid_hc_rx_slab = NULL;
- pr_info("CCID: Deactivated CCID %d (%s)\n",
+ pr_info("DCCP: Deactivated CCID %d (%s)\n",
ccid_ops->ccid_id, ccid_ops->ccid_name);
}
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index fadecd2..67164bb 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
{
- struct dccp_sock *dp = dccp_sk(sk);
u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
/*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
val = max_ratio;
}
- if (val > DCCPF_ACK_RATIO_MAX)
- val = DCCPF_ACK_RATIO_MAX;
+ dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
+ min_t(u32, val, DCCPF_ACK_RATIO_MAX));
+}
- if (val == dp->dccps_l_ack_ratio)
- return;
+static void ccid2_check_l_ack_ratio(struct sock *sk)
+{
+ struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- ccid2_pr_debug("changing local ack ratio to %u\n", val);
- dp->dccps_l_ack_ratio = val;
+ /*
+ * After a loss, idle period, application limited period, or RTO we
+ * need to check that the ack ratio is still less than the congestion
+ * window. Otherwise, we will send an entire congestion window of
+ * packets and got no response because we haven't sent ack ratio
+ * packets yet.
+ * If the ack ratio does need to be reduced, we reduce it to half of
+ * the congestion window (or 1 if that's zero) instead of to the
+ * congestion window. This prevents problems if one ack is lost.
+ */
+ if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
+ ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
+}
+
+static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
+{
+ dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
+ clamp_val(val, DCCPF_SEQ_WMIN,
+ DCCPF_SEQ_WMAX));
}
static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -153,17 +171,97 @@ out:
sock_put(sk);
}
+/*
+ * Congestion window validation (RFC 2861).
+ */
+static int ccid2_do_cwv = 1;
+module_param(ccid2_do_cwv, bool, 0644);
+MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
+
+/**
+ * ccid2_update_used_window - Track how much of cwnd is actually used
+ * This is done in addition to CWV. The sender needs to have an idea of how many
+ * packets may be in flight, to set the local Sequence Window value accordingly
+ * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
+ * maximum-used window. We use an EWMA low-pass filter to filter out noise.
+ */
+static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
+{
+ hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
+}
+
+/* This borrows the code of tcp_cwnd_application_limited() */
+static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
+{
+ struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ /* don't reduce cwnd below the initial window (IW) */
+ u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
+ win_used = max(hc->tx_cwnd_used, init_win);
+
+ if (win_used < hc->tx_cwnd) {
+ hc->tx_ssthresh = max(hc->tx_ssthresh,
+ (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
+ hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
+ }
+ hc->tx_cwnd_used = 0;
+ hc->tx_cwnd_stamp = now;
+
+ ccid2_check_l_ack_ratio(sk);
+}
+
+/* This borrows the code of tcp_cwnd_restart() */
+static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
+{
+ struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ u32 cwnd = hc->tx_cwnd, restart_cwnd,
+ iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+
+ hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
+
+ /* don't reduce cwnd below the initial window (IW) */
+ restart_cwnd = min(cwnd, iwnd);
+ cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
+ hc->tx_cwnd = max(cwnd, restart_cwnd);
+
+ hc->tx_cwnd_stamp = now;
+ hc->tx_cwnd_used = 0;
+
+ ccid2_check_l_ack_ratio(sk);
+}
+
static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ const u32 now = ccid2_time_stamp;
struct ccid2_seq *next;
- hc->tx_pipe++;
+ /* slow-start after idle periods (RFC 2581, RFC 2861) */
+ if (ccid2_do_cwv && !hc->tx_pipe &&
+ (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
+ ccid2_cwnd_restart(sk, now);
+
+ hc->tx_lsndtime = now;
+ hc->tx_pipe += 1;
+
+ /* see whether cwnd was fully used (RFC 2861), update expected window */
+ if (ccid2_cwnd_network_limited(hc)) {
+ ccid2_update_used_window(hc, hc->tx_cwnd);
+ hc->tx_cwnd_used = 0;
+ hc->tx_cwnd_stamp = now;
+ } else {
+ if (hc->tx_pipe > hc->tx_cwnd_used)
+ hc->tx_cwnd_used = hc->tx_pipe;
+
+ ccid2_update_used_window(hc, hc->tx_cwnd_used);
+
+ if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
+ ccid2_cwnd_application_limited(sk, now);
+ }
hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
hc->tx_seqh->ccid2s_acked = 0;
- hc->tx_seqh->ccid2s_sent = ccid2_time_stamp;
+ hc->tx_seqh->ccid2s_sent = now;
next = hc->tx_seqh->ccid2s_next;
/* check if we need to alloc more space */
@@ -329,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_cwnd < hc->tx_ssthresh) {
- if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
+ struct dccp_sock *dp = dccp_sk(sk);
+ int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
+
+ if (hc->tx_cwnd < dp->dccps_l_seq_win &&
+ r_seq_used < dp->dccps_r_seq_win) {
+ if (hc->tx_cwnd < hc->tx_ssthresh) {
+ if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
+ hc->tx_cwnd += 1;
+ *maxincr -= 1;
+ hc->tx_packets_acked = 0;
+ }
+ } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
hc->tx_cwnd += 1;
- *maxincr -= 1;
hc->tx_packets_acked = 0;
}
- } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
- hc->tx_cwnd += 1;
- hc->tx_packets_acked = 0;
}
+
+ /*
+ * Adjust the local sequence window and the ack ratio to allow about
+ * 5 times the number of packets in the network (RFC 4340 7.5.2)
+ */
+ if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
+ ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
+ else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
+ ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
+
+ if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
+ ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
+ else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
+ ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
+
/*
* FIXME: RTT is sampled several times per acknowledgment (for each
* entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -365,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
- /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
- if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
- ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
+ ccid2_check_l_ack_ratio(sk);
}
static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -418,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
if (hc->tx_rpdupack >= NUMDUPACK) {
hc->tx_rpdupack = -1; /* XXX lame */
hc->tx_rpseq = 0;
-
+#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
+ /*
+ * FIXME: Ack Congestion Control is broken; in
+ * the current state instabilities occurred with
+ * Ack Ratios greater than 1; causing hang-ups
+ * and long RTO timeouts. This needs to be fixed
+ * before opening up dynamic changes. -- gerrit
+ */
ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
+#endif
}
}
}
@@ -583,15 +707,6 @@ done:
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
- return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
{
struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
@@ -603,6 +718,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
/* Use larger initial windows (RFC 4341, section 5). */
hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
+ hc->tx_expected_wnd = hc->tx_cwnd;
/* Make sure that Ack Ratio is enabled and within bounds. */
max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
@@ -615,7 +731,8 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
hc->tx_rto = DCCP_TIMEOUT_INIT;
hc->tx_rpdupack = -1;
- hc->tx_last_cong = ccid2_time_stamp;
+ hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp;
+ hc->tx_cwnd_used = 0;
setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
(unsigned long)sk);
INIT_LIST_HEAD(&hc->tx_av_chunks);
@@ -636,18 +753,14 @@ static void ccid2_hc_tx_exit(struct sock *sk)
static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
- const struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
- switch (DCCP_SKB_CB(skb)->dccpd_type) {
- case DCCP_PKT_DATA:
- case DCCP_PKT_DATAACK:
- hc->rx_data++;
- if (hc->rx_data >= dp->dccps_r_ack_ratio) {
- dccp_send_ack(sk);
- hc->rx_data = 0;
- }
- break;
+ if (!dccp_data_packet(skb))
+ return;
+
+ if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
+ dccp_send_ack(sk);
+ hc->rx_num_data_pkts = 0;
}
}
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index e9985da..18c9754 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -43,6 +43,12 @@ struct ccid2_seq {
#define CCID2_SEQBUF_LEN 1024
#define CCID2_SEQBUF_MAX 128
+/*
+ * Multiple of congestion window to keep the sequence window at
+ * (RFC 4340 7.5.2)
+ */
+#define CCID2_WIN_CHANGE_FACTOR 5
+
/**
* struct ccid2_hc_tx_sock - CCID2 TX half connection
* @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
@@ -53,6 +59,10 @@ struct ccid2_seq {
* @tx_rttvar: moving average/maximum of @mdev_max
* @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988)
* @tx_rtt_seq: to decay RTTVAR at most once per flight
+ * @tx_cwnd_used: actually used cwnd, W_used of RFC 2861
+ * @tx_expected_wnd: moving average of @tx_cwnd_used
+ * @tx_cwnd_stamp: to track idle periods in CWV
+ * @tx_lsndtime: last time (in jiffies) a data packet was sent
* @tx_rpseq: last consecutive seqno
* @tx_rpdupack: dupacks since rpseq
* @tx_av_chunks: list of Ack Vectors received on current skb
@@ -76,6 +86,12 @@ struct ccid2_hc_tx_sock {
u64 tx_rtt_seq:48;
struct timer_list tx_rtotimer;
+ /* Congestion Window validation (optional, RFC 2861) */
+ u32 tx_cwnd_used,
+ tx_expected_wnd,
+ tx_cwnd_stamp,
+ tx_lsndtime;
+
u64 tx_rpseq;
int tx_rpdupack;
u32 tx_last_cong;
@@ -88,8 +104,21 @@ static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
return hc->tx_pipe >= hc->tx_cwnd;
}
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+ return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
+/**
+ * struct ccid2_hc_rx_sock - Receiving end of CCID-2 half-connection
+ * @rx_num_data_pkts: number of data packets received since last feedback
+ */
struct ccid2_hc_rx_sock {
- int rx_data;
+ u32 rx_num_data_pkts;
};
static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk)
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 4902029..1f94b7e 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -4,6 +4,7 @@
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
*/
+#include <linux/moduleparam.h>
#include "tfrc.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5fdb072..583490a 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk)
return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
}
+extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 568def9..23cea0e 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,6 +12,7 @@
* -----------
* o Feature negotiation is coordinated with connection setup (as in TCP), wild
* changes of parameters of an established connection are not supported.
+ * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN.
* o All currently known SP features have 1-byte quantities. If in the future
* extensions of RFCs 4340..42 define features with item lengths larger than
* one byte, a feature-specific extension of the code will be required.
@@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
}
+/**
+ * dccp_feat_activate - Activate feature value on socket
+ * @sk: fully connected DCCP socket (after handshake is complete)
+ * @feat_num: feature to activate, one of %dccp_feature_numbers
+ * @local: whether local (1) or remote (0) @feat_num is meant
+ * @fval: the value (SP or NN) to activate, or NULL to use the default value
+ * For general use this function is preferable over __dccp_feat_activate().
+ */
+static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
+ dccp_feat_val const *fval)
+{
+ return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
+}
+
/* Test for "Req'd" feature (RFC 4340, 6.4) */
static inline int dccp_feat_must_be_understood(u8 feat_num)
{
@@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
return -1;
if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
return -1;
- /*
- * Enter CHANGING after transmitting the Change option (6.6.2).
- */
- if (pos->state == FEAT_INITIALISING)
- pos->state = FEAT_CHANGING;
+
+ if (skb->sk->sk_state == DCCP_OPEN &&
+ (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
+ /*
+ * Confirms don't get retransmitted (6.6.3) once the
+ * connection is in state OPEN
+ */
+ dccp_feat_list_pop(pos);
+ } else {
+ /*
+ * Enter CHANGING after transmitting the Change
+ * option (6.6.2).
+ */
+ if (pos->state == FEAT_INITIALISING)
+ pos->state = FEAT_CHANGING;
+ }
}
return 0;
}
@@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
0, list, len);
}
+/**
+ * dccp_feat_nn_get - Query current/pending value of NN feature
+ * @sk: DCCP socket of an established connection
+ * @feat: NN feature number from %dccp_feature_numbers
+ * For a known NN feature, returns value currently being negotiated, or
+ * current (confirmed) value if no negotiation is going on.
+ */
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
+{
+ if (dccp_feat_type(feat) == FEAT_NN) {
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct dccp_feat_entry *entry;
+
+ entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1);
+ if (entry != NULL)
+ return entry->val.nn;
+
+ switch (feat) {
+ case DCCPF_ACK_RATIO:
+ return dp->dccps_l_ack_ratio;
+ case DCCPF_SEQUENCE_WINDOW:
+ return dp->dccps_l_seq_win;
+ }
+ }
+ DCCP_BUG("attempt to look up unsupported feature %u", feat);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
+
+/**
+ * dccp_feat_signal_nn_change - Update NN values for an established connection
+ * @sk: DCCP socket of an established connection
+ * @feat: NN feature number from %dccp_feature_numbers
+ * @nn_val: the new value to use
+ * This function is used to communicate NN updates out-of-band.
+ */
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
+{
+ struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
+ dccp_feat_val fval = { .nn = nn_val };
+ struct dccp_feat_entry *entry;
+
+ if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
+ return 0;
+
+ if (dccp_feat_type(feat) != FEAT_NN ||
+ !dccp_feat_is_valid_nn_val(feat, nn_val))
+ return -EINVAL;
+
+ if (nn_val == dccp_feat_nn_get(sk, feat))
+ return 0; /* already set or negotiation under way */
+
+ entry = dccp_feat_list_lookup(fn, feat, 1);
+ if (entry != NULL) {
+ dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n",
+ (unsigned long long)entry->val.nn,
+ (unsigned long long)nn_val);
+ dccp_feat_list_pop(entry);
+ }
+
+ inet_csk_schedule_ack(sk);
+ return dccp_feat_push_change(fn, feat, 1, 0, &fval);
+}
+EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change);
/*
* Tracking features whose value depend on the choice of CCID
@@ -1187,6 +1277,100 @@ confirmation_failed:
}
/**
+ * dccp_feat_handle_nn_established - Fast-path reception of NN options
+ * @sk: socket of an established DCCP connection
+ * @mandatory: whether @opt was preceded by a Mandatory option
+ * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only)
+ * @feat: NN number, one of %dccp_feature_numbers
+ * @val: NN value
+ * @len: length of @val in bytes
+ * This function combines the functionality of change_recv/confirm_recv, with
+ * the following differences (reset codes are the same):
+ * - cleanup after receiving the Confirm;
+ * - values are directly activated after successful parsing;
+ * - deliberately restricted to NN features.
+ * The restriction to NN features is essential since SP features can have non-
+ * predictable outcomes (depending on the remote configuration), and are inter-
+ * dependent (CCIDs for instance cause further dependencies).
+ */
+static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
+ u8 feat, u8 *val, u8 len)
+{
+ struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
+ const bool local = (opt == DCCPO_CONFIRM_R);
+ struct dccp_feat_entry *entry;
+ u8 type = dccp_feat_type(feat);
+ dccp_feat_val fval;
+
+ dccp_feat_print_opt(opt, feat, val, len, mandatory);
+
+ /* Ignore non-mandatory unknown and non-NN features */
+ if (type == FEAT_UNKNOWN) {
+ if (local && !mandatory)
+ return 0;
+ goto fast_path_unknown;
+ } else if (type != FEAT_NN) {
+ return 0;
+ }
+
+ /*
+ * We don't accept empty Confirms, since in fast-path feature
+ * negotiation the values are enabled immediately after sending
+ * the Change option.
+ * Empty Changes on the other hand are invalid (RFC 4340, 6.1).
+ */
+ if (len == 0 || len > sizeof(fval.nn))
+ goto fast_path_unknown;
+
+ if (opt == DCCPO_CHANGE_L) {
+ fval.nn = dccp_decode_value_var(val, len);
+ if (!dccp_feat_is_valid_nn_val(feat, fval.nn))
+ goto fast_path_unknown;
+
+ if (dccp_feat_push_confirm(fn, feat, local, &fval) ||
+ dccp_feat_activate(sk, feat, local, &fval))
+ return DCCP_RESET_CODE_TOO_BUSY;
+
+ /* set the `Ack Pending' flag to piggyback a Confirm */
+ inet_csk_schedule_ack(sk);
+
+ } else if (opt == DCCPO_CONFIRM_R) {
+ entry = dccp_feat_list_lookup(fn, feat, local);
+ if (entry == NULL || entry->state != FEAT_CHANGING)
+ return 0;
+
+ fval.nn = dccp_decode_value_var(val, len);
+ /*
+ * Just ignore a value that doesn't match our current value.
+ * If the option changes twice within two RTTs, then at least
+ * one CONFIRM will be received for the old value after a
+ * new CHANGE was sent.
+ */
+ if (fval.nn != entry->val.nn)
+ return 0;
+
+ /* Only activate after receiving the Confirm option (6.6.1). */
+ dccp_feat_activate(sk, feat, local, &fval);
+
+ /* It has been confirmed - so remove the entry */
+ dccp_feat_list_pop(entry);
+
+ } else {
+ DCCP_WARN("Received illegal option %u\n", opt);
+ goto fast_path_failed;
+ }
+ return 0;
+
+fast_path_unknown:
+ if (!mandatory)
+ return dccp_push_empty_confirm(fn, feat, local);
+
+fast_path_failed:
+ return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR
+ : DCCP_RESET_CODE_OPTION_ERROR;
+}
+
+/**
* dccp_feat_parse_options - Process Feature-Negotiation Options
* @sk: for general use and used by the client during connection setup
* @dreq: used by the server during connection setup
@@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
return dccp_feat_confirm_recv(fn, mandatory, opt, feat,
val, len, server);
}
+ break;
+ /*
+ * Support for exchanging NN options on an established connection.
+ */
+ case DCCP_OPEN:
+ case DCCP_PARTOPEN:
+ return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
+ val, len);
}
return 0; /* ignore FN options in all other states */
}
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index e56a4e5..90b957d 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
extern int dccp_insert_option_mandatory(struct sk_buff *skb);
extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4222e7a..51d5fe5 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -619,20 +619,31 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 1;
}
- if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
- if (dccp_check_seqno(sk, skb))
- goto discard;
-
- /*
- * Step 8: Process options and mark acknowledgeable
- */
- if (dccp_parse_options(sk, NULL, skb))
- return 1;
+ /* Step 6: Check sequence numbers (omitted in LISTEN/REQUEST state) */
+ if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
+ goto discard;
- dccp_handle_ackvec_processing(sk, skb);
- dccp_deliver_input_to_ccids(sk, skb);
+ /*
+ * Step 7: Check for unexpected packet types
+ * If (S.is_server and P.type == Response)
+ * or (S.is_client and P.type == Request)
+ * or (S.state == RESPOND and P.type == Data),
+ * Send Sync packet acknowledging P.seqno
+ * Drop packet and return
+ */
+ if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
+ dh->dccph_type == DCCP_PKT_RESPONSE) ||
+ (dp->dccps_role == DCCP_ROLE_CLIENT &&
+ dh->dccph_type == DCCP_PKT_REQUEST) ||
+ (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
+ dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
+ goto discard;
}
+ /* Step 8: Process options */
+ if (dccp_parse_options(sk, NULL, skb))
+ return 1;
+
/*
* Step 9: Process Reset
* If P.type == Reset,
@@ -640,31 +651,15 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* S.state := TIMEWAIT
* Set TIMEWAIT timer
* Drop packet and return
- */
+ */
if (dh->dccph_type == DCCP_PKT_RESET) {
dccp_rcv_reset(sk, skb);
return 0;
- /*
- * Step 7: Check for unexpected packet types
- * If (S.is_server and P.type == Response)
- * or (S.is_client and P.type == Request)
- * or (S.state == RESPOND and P.type == Data),
- * Send Sync packet acknowledging P.seqno
- * Drop packet and return
- */
- } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
- dh->dccph_type == DCCP_PKT_RESPONSE) ||
- (dp->dccps_role == DCCP_ROLE_CLIENT &&
- dh->dccph_type == DCCP_PKT_REQUEST) ||
- (sk->sk_state == DCCP_RESPOND &&
- dh->dccph_type == DCCP_PKT_DATA)) {
- dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
- goto discard;
- } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
+ } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { /* Step 13 */
if (dccp_rcv_closereq(sk, skb))
return 0;
goto discard;
- } else if (dh->dccph_type == DCCP_PKT_CLOSE) {
+ } else if (dh->dccph_type == DCCP_PKT_CLOSE) { /* Step 14 */
if (dccp_rcv_close(sk, skb))
return 0;
goto discard;
@@ -679,8 +674,12 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
__kfree_skb(skb);
return 0;
- case DCCP_RESPOND:
case DCCP_PARTOPEN:
+ /* Step 8: if using Ack Vectors, mark packet acknowledgeable */
+ dccp_handle_ackvec_processing(sk, skb);
+ dccp_deliver_input_to_ccids(sk, skb);
+ /* fall through */
+ case DCCP_RESPOND:
queued = dccp_rcv_respond_partopen_state_process(sk, skb,
dh, len);
break;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 332639b..72416c8 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
rt = NULL;
goto failure;
}
@@ -433,7 +434,8 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
- sock_put(newsk);
+ inet_csk_prepare_forced_close(newsk);
+ dccp_done(newsk);
goto exit;
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b74f761..592b78c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -271,7 +271,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr,
&ireq6->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
- err = ip6_xmit(sk, skb, &fl6, opt);
+ err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -326,7 +326,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl6, NULL);
+ ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return;
@@ -609,7 +609,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
- sock_put(newsk);
+ inet_csk_prepare_forced_close(newsk);
+ dccp_done(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index fab108e..dede3ed 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -27,11 +27,13 @@ static inline void dccp_event_ack_sent(struct sock *sk)
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
-static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
+/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
+static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
skb_set_owner_w(skb, sk);
WARN_ON(sk->sk_send_head);
sk->sk_send_head = skb;
+ return skb_clone(sk->sk_send_head, gfp_any());
}
/*
@@ -552,8 +554,7 @@ int dccp_connect(struct sock *sk)
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
- dccp_skb_entail(sk, skb);
- dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
+ dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
/* Timer for repeating the REQUEST until an answer. */
@@ -678,8 +679,7 @@ void dccp_send_close(struct sock *sk, const int active)
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
if (active) {
- dccp_skb_entail(sk, skb);
- dccp_transmit_skb(sk, skb_clone(skb, prio));
+ skb = dccp_skb_entail(sk, skb);
/*
* Retransmission timer for active-close: RFC 4340, 8.3 requires
* to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
@@ -692,6 +692,6 @@ void dccp_send_close(struct sock *sk, const int active)
*/
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
- } else
- dccp_transmit_skb(sk, skb);
+ }
+ dccp_transmit_skb(sk, skb);
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 152975d..e742f90 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
dp->dccps_rate_last = jiffies;
dp->dccps_role = DCCP_ROLE_UNDEFINED;
dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
- dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
dccp_init_xmit_timers(sk);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 7587870..16f0b22 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -12,6 +12,7 @@
#include <linux/dccp.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
#include "dccp.h"
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d71f0d2..16fbf8c 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -291,23 +291,23 @@ int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned c
*buf++ = type;
- switch(type) {
- case 0:
- *buf++ = sdn->sdn_objnum;
- break;
- case 1:
- *buf++ = 0;
- *buf++ = le16_to_cpu(sdn->sdn_objnamel);
- memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
- len = 3 + le16_to_cpu(sdn->sdn_objnamel);
- break;
- case 2:
- memset(buf, 0, 5);
- buf += 5;
- *buf++ = le16_to_cpu(sdn->sdn_objnamel);
- memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
- len = 7 + le16_to_cpu(sdn->sdn_objnamel);
- break;
+ switch (type) {
+ case 0:
+ *buf++ = sdn->sdn_objnum;
+ break;
+ case 1:
+ *buf++ = 0;
+ *buf++ = le16_to_cpu(sdn->sdn_objnamel);
+ memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
+ len = 3 + le16_to_cpu(sdn->sdn_objnamel);
+ break;
+ case 2:
+ memset(buf, 0, 5);
+ buf += 5;
+ *buf++ = le16_to_cpu(sdn->sdn_objnamel);
+ memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
+ len = 7 + le16_to_cpu(sdn->sdn_objnamel);
+ break;
}
return len;
@@ -337,23 +337,23 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn,
*fmt = *data++;
type = *data++;
- switch(*fmt) {
- case 0:
- sdn->sdn_objnum = type;
- return 2;
- case 1:
- namel = 16;
- break;
- case 2:
- len -= 4;
- data += 4;
- break;
- case 4:
- len -= 8;
- data += 8;
- break;
- default:
- return -1;
+ switch (*fmt) {
+ case 0:
+ sdn->sdn_objnum = type;
+ return 2;
+ case 1:
+ namel = 16;
+ break;
+ case 2:
+ len -= 4;
+ data += 4;
+ break;
+ case 4:
+ len -= 8;
+ data += 8;
+ break;
+ default:
+ return -1;
}
len -= 1;
@@ -575,25 +575,26 @@ int dn_destroy_timer(struct sock *sk)
scp->persist = dn_nsp_persist(sk);
- switch(scp->state) {
- case DN_DI:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
- if (scp->nsp_rxtshift >= decnet_di_count)
- scp->state = DN_CN;
- return 0;
+ switch (scp->state) {
+ case DN_DI:
+ dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
+ if (scp->nsp_rxtshift >= decnet_di_count)
+ scp->state = DN_CN;
+ return 0;
- case DN_DR:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
- if (scp->nsp_rxtshift >= decnet_dr_count)
- scp->state = DN_DRC;
- return 0;
+ case DN_DR:
+ dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
+ if (scp->nsp_rxtshift >= decnet_dr_count)
+ scp->state = DN_DRC;
+ return 0;
- case DN_DN:
- if (scp->nsp_rxtshift < decnet_dn_count) {
- /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
- return 0;
- }
+ case DN_DN:
+ if (scp->nsp_rxtshift < decnet_dn_count) {
+ /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
+ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
+ GFP_ATOMIC);
+ return 0;
+ }
}
scp->persist = (HZ * decnet_time_wait);
@@ -623,42 +624,42 @@ static void dn_destroy_sock(struct sock *sk)
sk->sk_state = TCP_CLOSE;
- switch(scp->state) {
- case DN_DN:
- dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
- sk->sk_allocation);
- scp->persist_fxn = dn_destroy_timer;
- scp->persist = dn_nsp_persist(sk);
- break;
- case DN_CR:
- scp->state = DN_DR;
- goto disc_reject;
- case DN_RUN:
- scp->state = DN_DI;
- case DN_DI:
- case DN_DR:
+ switch (scp->state) {
+ case DN_DN:
+ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
+ sk->sk_allocation);
+ scp->persist_fxn = dn_destroy_timer;
+ scp->persist = dn_nsp_persist(sk);
+ break;
+ case DN_CR:
+ scp->state = DN_DR;
+ goto disc_reject;
+ case DN_RUN:
+ scp->state = DN_DI;
+ case DN_DI:
+ case DN_DR:
disc_reject:
- dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
- case DN_NC:
- case DN_NR:
- case DN_RJ:
- case DN_DIC:
- case DN_CN:
- case DN_DRC:
- case DN_CI:
- case DN_CD:
- scp->persist_fxn = dn_destroy_timer;
- scp->persist = dn_nsp_persist(sk);
- break;
- default:
- printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
- case DN_O:
- dn_stop_slow_timer(sk);
+ dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
+ case DN_NC:
+ case DN_NR:
+ case DN_RJ:
+ case DN_DIC:
+ case DN_CN:
+ case DN_DRC:
+ case DN_CI:
+ case DN_CD:
+ scp->persist_fxn = dn_destroy_timer;
+ scp->persist = dn_nsp_persist(sk);
+ break;
+ default:
+ printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
+ case DN_O:
+ dn_stop_slow_timer(sk);
- dn_unhash_sock_bh(sk);
- sock_put(sk);
+ dn_unhash_sock_bh(sk);
+ sock_put(sk);
- break;
+ break;
}
}
@@ -683,15 +684,15 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
- switch(sock->type) {
- case SOCK_SEQPACKET:
- if (protocol != DNPROTO_NSP)
- return -EPROTONOSUPPORT;
- break;
- case SOCK_STREAM:
- break;
- default:
- return -ESOCKTNOSUPPORT;
+ switch (sock->type) {
+ case SOCK_SEQPACKET:
+ if (protocol != DNPROTO_NSP)
+ return -EPROTONOSUPPORT;
+ break;
+ case SOCK_STREAM:
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
}
@@ -987,16 +988,16 @@ static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int
{
struct dn_scp *scp = DN_SK(sk);
- switch(scp->state) {
- case DN_RUN:
- return 0;
- case DN_CR:
- return dn_confirm_accept(sk, timeo, sk->sk_allocation);
- case DN_CI:
- case DN_CC:
- return dn_wait_run(sk, timeo);
- case DN_O:
- return __dn_connect(sk, addr, addrlen, timeo, flags);
+ switch (scp->state) {
+ case DN_RUN:
+ return 0;
+ case DN_CR:
+ return dn_confirm_accept(sk, timeo, sk->sk_allocation);
+ case DN_CI:
+ case DN_CC:
+ return dn_wait_run(sk, timeo);
+ case DN_O:
+ return __dn_connect(sk, addr, addrlen, timeo, flags);
}
return -EINVAL;
@@ -1363,141 +1364,140 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
if (copy_from_user(&u, optval, optlen))
return -EFAULT;
- switch(optname) {
- case DSO_CONDATA:
- if (sock->state == SS_CONNECTED)
- return -EISCONN;
- if ((scp->state != DN_O) && (scp->state != DN_CR))
- return -EINVAL;
+ switch (optname) {
+ case DSO_CONDATA:
+ if (sock->state == SS_CONNECTED)
+ return -EISCONN;
+ if ((scp->state != DN_O) && (scp->state != DN_CR))
+ return -EINVAL;
- if (optlen != sizeof(struct optdata_dn))
- return -EINVAL;
+ if (optlen != sizeof(struct optdata_dn))
+ return -EINVAL;
- if (le16_to_cpu(u.opt.opt_optl) > 16)
- return -EINVAL;
+ if (le16_to_cpu(u.opt.opt_optl) > 16)
+ return -EINVAL;
- memcpy(&scp->conndata_out, &u.opt, optlen);
- break;
-
- case DSO_DISDATA:
- if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
- return -ENOTCONN;
-
- if (optlen != sizeof(struct optdata_dn))
- return -EINVAL;
+ memcpy(&scp->conndata_out, &u.opt, optlen);
+ break;
- if (le16_to_cpu(u.opt.opt_optl) > 16)
- return -EINVAL;
+ case DSO_DISDATA:
+ if (sock->state != SS_CONNECTED &&
+ scp->accept_mode == ACC_IMMED)
+ return -ENOTCONN;
- memcpy(&scp->discdata_out, &u.opt, optlen);
- break;
+ if (optlen != sizeof(struct optdata_dn))
+ return -EINVAL;
- case DSO_CONACCESS:
- if (sock->state == SS_CONNECTED)
- return -EISCONN;
- if (scp->state != DN_O)
- return -EINVAL;
+ if (le16_to_cpu(u.opt.opt_optl) > 16)
+ return -EINVAL;
- if (optlen != sizeof(struct accessdata_dn))
- return -EINVAL;
+ memcpy(&scp->discdata_out, &u.opt, optlen);
+ break;
- if ((u.acc.acc_accl > DN_MAXACCL) ||
- (u.acc.acc_passl > DN_MAXACCL) ||
- (u.acc.acc_userl > DN_MAXACCL))
- return -EINVAL;
+ case DSO_CONACCESS:
+ if (sock->state == SS_CONNECTED)
+ return -EISCONN;
+ if (scp->state != DN_O)
+ return -EINVAL;
- memcpy(&scp->accessdata, &u.acc, optlen);
- break;
+ if (optlen != sizeof(struct accessdata_dn))
+ return -EINVAL;
- case DSO_ACCEPTMODE:
- if (sock->state == SS_CONNECTED)
- return -EISCONN;
- if (scp->state != DN_O)
- return -EINVAL;
+ if ((u.acc.acc_accl > DN_MAXACCL) ||
+ (u.acc.acc_passl > DN_MAXACCL) ||
+ (u.acc.acc_userl > DN_MAXACCL))
+ return -EINVAL;
- if (optlen != sizeof(int))
- return -EINVAL;
+ memcpy(&scp->accessdata, &u.acc, optlen);
+ break;
- if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
- return -EINVAL;
+ case DSO_ACCEPTMODE:
+ if (sock->state == SS_CONNECTED)
+ return -EISCONN;
+ if (scp->state != DN_O)
+ return -EINVAL;
- scp->accept_mode = (unsigned char)u.mode;
- break;
+ if (optlen != sizeof(int))
+ return -EINVAL;
- case DSO_CONACCEPT:
+ if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
+ return -EINVAL;
- if (scp->state != DN_CR)
- return -EINVAL;
- timeo = sock_rcvtimeo(sk, 0);
- err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
- return err;
+ scp->accept_mode = (unsigned char)u.mode;
+ break;
- case DSO_CONREJECT:
+ case DSO_CONACCEPT:
+ if (scp->state != DN_CR)
+ return -EINVAL;
+ timeo = sock_rcvtimeo(sk, 0);
+ err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
+ return err;
- if (scp->state != DN_CR)
- return -EINVAL;
+ case DSO_CONREJECT:
+ if (scp->state != DN_CR)
+ return -EINVAL;
- scp->state = DN_DR;
- sk->sk_shutdown = SHUTDOWN_MASK;
- dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
- break;
+ scp->state = DN_DR;
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
+ break;
- default:
+ default:
#ifdef CONFIG_NETFILTER
return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
#endif
- case DSO_LINKINFO:
- case DSO_STREAM:
- case DSO_SEQPACKET:
- return -ENOPROTOOPT;
-
- case DSO_MAXWINDOW:
- if (optlen != sizeof(unsigned long))
- return -EINVAL;
- if (u.win > NSP_MAX_WINDOW)
- u.win = NSP_MAX_WINDOW;
- if (u.win == 0)
- return -EINVAL;
- scp->max_window = u.win;
- if (scp->snd_window > u.win)
- scp->snd_window = u.win;
- break;
+ case DSO_LINKINFO:
+ case DSO_STREAM:
+ case DSO_SEQPACKET:
+ return -ENOPROTOOPT;
+
+ case DSO_MAXWINDOW:
+ if (optlen != sizeof(unsigned long))
+ return -EINVAL;
+ if (u.win > NSP_MAX_WINDOW)
+ u.win = NSP_MAX_WINDOW;
+ if (u.win == 0)
+ return -EINVAL;
+ scp->max_window = u.win;
+ if (scp->snd_window > u.win)
+ scp->snd_window = u.win;
+ break;
- case DSO_NODELAY:
- if (optlen != sizeof(int))
- return -EINVAL;
- if (scp->nonagle == 2)
- return -EINVAL;
- scp->nonagle = (u.val == 0) ? 0 : 1;
- /* if (scp->nonagle == 1) { Push pending frames } */
- break;
+ case DSO_NODELAY:
+ if (optlen != sizeof(int))
+ return -EINVAL;
+ if (scp->nonagle == 2)
+ return -EINVAL;
+ scp->nonagle = (u.val == 0) ? 0 : 1;
+ /* if (scp->nonagle == 1) { Push pending frames } */
+ break;
- case DSO_CORK:
- if (optlen != sizeof(int))
- return -EINVAL;
- if (scp->nonagle == 1)
- return -EINVAL;
- scp->nonagle = (u.val == 0) ? 0 : 2;
- /* if (scp->nonagle == 0) { Push pending frames } */
- break;
+ case DSO_CORK:
+ if (optlen != sizeof(int))
+ return -EINVAL;
+ if (scp->nonagle == 1)
+ return -EINVAL;
+ scp->nonagle = (u.val == 0) ? 0 : 2;
+ /* if (scp->nonagle == 0) { Push pending frames } */
+ break;
- case DSO_SERVICES:
- if (optlen != sizeof(unsigned char))
- return -EINVAL;
- if ((u.services & ~NSP_FC_MASK) != 0x01)
- return -EINVAL;
- if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
- return -EINVAL;
- scp->services_loc = u.services;
- break;
+ case DSO_SERVICES:
+ if (optlen != sizeof(unsigned char))
+ return -EINVAL;
+ if ((u.services & ~NSP_FC_MASK) != 0x01)
+ return -EINVAL;
+ if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
+ return -EINVAL;
+ scp->services_loc = u.services;
+ break;
- case DSO_INFO:
- if (optlen != sizeof(unsigned char))
- return -EINVAL;
- if (u.info & 0xfc)
- return -EINVAL;
- scp->info_loc = u.info;
- break;
+ case DSO_INFO:
+ if (optlen != sizeof(unsigned char))
+ return -EINVAL;
+ if (u.info & 0xfc)
+ return -EINVAL;
+ scp->info_loc = u.info;
+ break;
}
return 0;
@@ -1527,107 +1527,106 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
if(get_user(r_len , optlen))
return -EFAULT;
- switch(optname) {
- case DSO_CONDATA:
- if (r_len > sizeof(struct optdata_dn))
- r_len = sizeof(struct optdata_dn);
- r_data = &scp->conndata_in;
- break;
-
- case DSO_DISDATA:
- if (r_len > sizeof(struct optdata_dn))
- r_len = sizeof(struct optdata_dn);
- r_data = &scp->discdata_in;
- break;
+ switch (optname) {
+ case DSO_CONDATA:
+ if (r_len > sizeof(struct optdata_dn))
+ r_len = sizeof(struct optdata_dn);
+ r_data = &scp->conndata_in;
+ break;
- case DSO_CONACCESS:
- if (r_len > sizeof(struct accessdata_dn))
- r_len = sizeof(struct accessdata_dn);
- r_data = &scp->accessdata;
- break;
+ case DSO_DISDATA:
+ if (r_len > sizeof(struct optdata_dn))
+ r_len = sizeof(struct optdata_dn);
+ r_data = &scp->discdata_in;
+ break;
- case DSO_ACCEPTMODE:
- if (r_len > sizeof(unsigned char))
- r_len = sizeof(unsigned char);
- r_data = &scp->accept_mode;
- break;
+ case DSO_CONACCESS:
+ if (r_len > sizeof(struct accessdata_dn))
+ r_len = sizeof(struct accessdata_dn);
+ r_data = &scp->accessdata;
+ break;
- case DSO_LINKINFO:
- if (r_len > sizeof(struct linkinfo_dn))
- r_len = sizeof(struct linkinfo_dn);
+ case DSO_ACCEPTMODE:
+ if (r_len > sizeof(unsigned char))
+ r_len = sizeof(unsigned char);
+ r_data = &scp->accept_mode;
+ break;
- memset(&link, 0, sizeof(link));
+ case DSO_LINKINFO:
+ if (r_len > sizeof(struct linkinfo_dn))
+ r_len = sizeof(struct linkinfo_dn);
- switch(sock->state) {
- case SS_CONNECTING:
- link.idn_linkstate = LL_CONNECTING;
- break;
- case SS_DISCONNECTING:
- link.idn_linkstate = LL_DISCONNECTING;
- break;
- case SS_CONNECTED:
- link.idn_linkstate = LL_RUNNING;
- break;
- default:
- link.idn_linkstate = LL_INACTIVE;
- }
+ memset(&link, 0, sizeof(link));
- link.idn_segsize = scp->segsize_rem;
- r_data = &link;
+ switch (sock->state) {
+ case SS_CONNECTING:
+ link.idn_linkstate = LL_CONNECTING;
+ break;
+ case SS_DISCONNECTING:
+ link.idn_linkstate = LL_DISCONNECTING;
+ break;
+ case SS_CONNECTED:
+ link.idn_linkstate = LL_RUNNING;
break;
-
default:
+ link.idn_linkstate = LL_INACTIVE;
+ }
+
+ link.idn_segsize = scp->segsize_rem;
+ r_data = &link;
+ break;
+
+ default:
#ifdef CONFIG_NETFILTER
- {
- int ret, len;
+ {
+ int ret, len;
- if(get_user(len, optlen))
- return -EFAULT;
+ if (get_user(len, optlen))
+ return -EFAULT;
- ret = nf_getsockopt(sk, PF_DECnet, optname,
- optval, &len);
- if (ret >= 0)
- ret = put_user(len, optlen);
- return ret;
- }
+ ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
+ if (ret >= 0)
+ ret = put_user(len, optlen);
+ return ret;
+ }
#endif
- case DSO_STREAM:
- case DSO_SEQPACKET:
- case DSO_CONACCEPT:
- case DSO_CONREJECT:
- return -ENOPROTOOPT;
-
- case DSO_MAXWINDOW:
- if (r_len > sizeof(unsigned long))
- r_len = sizeof(unsigned long);
- r_data = &scp->max_window;
- break;
+ case DSO_STREAM:
+ case DSO_SEQPACKET:
+ case DSO_CONACCEPT:
+ case DSO_CONREJECT:
+ return -ENOPROTOOPT;
+
+ case DSO_MAXWINDOW:
+ if (r_len > sizeof(unsigned long))
+ r_len = sizeof(unsigned long);
+ r_data = &scp->max_window;
+ break;
- case DSO_NODELAY:
- if (r_len > sizeof(int))
- r_len = sizeof(int);
- val = (scp->nonagle == 1);
- r_data = &val;
- break;
+ case DSO_NODELAY:
+ if (r_len > sizeof(int))
+ r_len = sizeof(int);
+ val = (scp->nonagle == 1);
+ r_data = &val;
+ break;
- case DSO_CORK:
- if (r_len > sizeof(int))
- r_len = sizeof(int);
- val = (scp->nonagle == 2);
- r_data = &val;
- break;
+ case DSO_CORK:
+ if (r_len > sizeof(int))
+ r_len = sizeof(int);
+ val = (scp->nonagle == 2);
+ r_data = &val;
+ break;
- case DSO_SERVICES:
- if (r_len > sizeof(unsigned char))
- r_len = sizeof(unsigned char);
- r_data = &scp->services_rem;
- break;
+ case DSO_SERVICES:
+ if (r_len > sizeof(unsigned char))
+ r_len = sizeof(unsigned char);
+ r_data = &scp->services_rem;
+ break;
- case DSO_INFO:
- if (r_len > sizeof(unsigned char))
- r_len = sizeof(unsigned char);
- r_data = &scp->info_rem;
- break;
+ case DSO_INFO:
+ if (r_len > sizeof(unsigned char))
+ r_len = sizeof(unsigned char);
+ r_data = &scp->info_rem;
+ break;
}
if (r_data) {
@@ -2088,15 +2087,15 @@ static int dn_device_event(struct notifier_block *this, unsigned long event,
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
- switch(event) {
- case NETDEV_UP:
- dn_dev_up(dev);
- break;
- case NETDEV_DOWN:
- dn_dev_down(dev);
- break;
- default:
- break;
+ switch (event) {
+ case NETDEV_UP:
+ dn_dev_up(dev);
+ break;
+ case NETDEV_DOWN:
+ dn_dev_down(dev);
+ break;
+ default:
+ break;
}
return NOTIFY_DONE;
@@ -2209,54 +2208,54 @@ static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
int i;
switch (le16_to_cpu(dn->sdn_objnamel)) {
- case 0:
- sprintf(buf, "%d", dn->sdn_objnum);
- break;
- default:
- for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
- buf[i] = dn->sdn_objname[i];
- if (IS_NOT_PRINTABLE(buf[i]))
- buf[i] = '.';
- }
- buf[i] = 0;
+ case 0:
+ sprintf(buf, "%d", dn->sdn_objnum);
+ break;
+ default:
+ for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
+ buf[i] = dn->sdn_objname[i];
+ if (IS_NOT_PRINTABLE(buf[i]))
+ buf[i] = '.';
+ }
+ buf[i] = 0;
}
}
static char *dn_state2asc(unsigned char state)
{
- switch(state) {
- case DN_O:
- return "OPEN";
- case DN_CR:
- return " CR";
- case DN_DR:
- return " DR";
- case DN_DRC:
- return " DRC";
- case DN_CC:
- return " CC";
- case DN_CI:
- return " CI";
- case DN_NR:
- return " NR";
- case DN_NC:
- return " NC";
- case DN_CD:
- return " CD";
- case DN_RJ:
- return " RJ";
- case DN_RUN:
- return " RUN";
- case DN_DI:
- return " DI";
- case DN_DIC:
- return " DIC";
- case DN_DN:
- return " DN";
- case DN_CL:
- return " CL";
- case DN_CN:
- return " CN";
+ switch (state) {
+ case DN_O:
+ return "OPEN";
+ case DN_CR:
+ return " CR";
+ case DN_DR:
+ return " DR";
+ case DN_DRC:
+ return " DRC";
+ case DN_CC:
+ return " CC";
+ case DN_CI:
+ return " CI";
+ case DN_NR:
+ return " NR";
+ case DN_NC:
+ return " NC";
+ case DN_CD:
+ return " CD";
+ case DN_RJ:
+ return " RJ";
+ case DN_RUN:
+ return " RUN";
+ case DN_DI:
+ return " DI";
+ case DN_DIC:
+ return " DIC";
+ case DN_DN:
+ return " DN";
+ case DN_CL:
+ return " CL";
+ case DN_CN:
+ return " CN";
}
return "????";
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 3780fd6..74d321a 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -437,17 +437,17 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
dev_load(&init_net, ifr->ifr_name);
- switch(cmd) {
- case SIOCGIFADDR:
- break;
- case SIOCSIFADDR:
- if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- if (sdn->sdn_family != AF_DECnet)
- return -EINVAL;
- break;
- default:
+ switch (cmd) {
+ case SIOCGIFADDR:
+ break;
+ case SIOCSIFADDR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+ if (sdn->sdn_family != AF_DECnet)
return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
}
rtnl_lock();
@@ -470,27 +470,27 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
goto done;
}
- switch(cmd) {
- case SIOCGIFADDR:
- *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
- goto rarok;
-
- case SIOCSIFADDR:
- if (!ifa) {
- if ((ifa = dn_dev_alloc_ifa()) == NULL) {
- ret = -ENOBUFS;
- break;
- }
- memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
- } else {
- if (ifa->ifa_local == dn_saddr2dn(sdn))
- break;
- dn_dev_del_ifa(dn_db, ifap, 0);
+ switch (cmd) {
+ case SIOCGIFADDR:
+ *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
+ goto rarok;
+
+ case SIOCSIFADDR:
+ if (!ifa) {
+ if ((ifa = dn_dev_alloc_ifa()) == NULL) {
+ ret = -ENOBUFS;
+ break;
}
+ memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
+ } else {
+ if (ifa->ifa_local == dn_saddr2dn(sdn))
+ break;
+ dn_dev_del_ifa(dn_db, ifap, 0);
+ }
- ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
+ ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
- ret = dn_dev_set_ifa(dev, ifa);
+ ret = dn_dev_set_ifa(dev, ifa);
}
done:
rtnl_unlock();
@@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
if (!dn_db->neigh_parms) {
- rcu_assign_pointer(dev->dn_ptr, NULL);
+ RCU_INIT_POINTER(dev->dn_ptr, NULL);
kfree(dn_db);
return NULL;
}
@@ -1313,7 +1313,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
- dev = (struct net_device *)v;
+ dev = v;
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&init_net.dev_base_head);
@@ -1335,13 +1335,13 @@ static void dn_dev_seq_stop(struct seq_file *seq, void *v)
static char *dn_type2asc(char type)
{
- switch(type) {
- case DN_DEV_BCAST:
- return "B";
- case DN_DEV_UCAST:
- return "U";
- case DN_DEV_MPOINT:
- return "M";
+ switch (type) {
+ case DN_DEV_BCAST:
+ return "B";
+ case DN_DEV_UCAST:
+ return "U";
+ case DN_DEV_MPOINT:
+ return "M";
}
return "?";
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 104324d..9e885f1 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -30,7 +30,7 @@
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <net/neighbour.h>
#include <net/dst.h>
@@ -414,33 +414,34 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
res->fi = fi;
- switch(type) {
- case RTN_NAT:
- DN_FIB_RES_RESET(*res);
+ switch (type) {
+ case RTN_NAT:
+ DN_FIB_RES_RESET(*res);
+ atomic_inc(&fi->fib_clntref);
+ return 0;
+ case RTN_UNICAST:
+ case RTN_LOCAL:
+ for_nexthops(fi) {
+ if (nh->nh_flags & RTNH_F_DEAD)
+ continue;
+ if (!fld->flowidn_oif ||
+ fld->flowidn_oif == nh->nh_oif)
+ break;
+ }
+ if (nhsel < fi->fib_nhs) {
+ res->nh_sel = nhsel;
atomic_inc(&fi->fib_clntref);
return 0;
- case RTN_UNICAST:
- case RTN_LOCAL:
- for_nexthops(fi) {
- if (nh->nh_flags & RTNH_F_DEAD)
- continue;
- if (!fld->flowidn_oif ||
- fld->flowidn_oif == nh->nh_oif)
- break;
- }
- if (nhsel < fi->fib_nhs) {
- res->nh_sel = nhsel;
- atomic_inc(&fi->fib_clntref);
- return 0;
- }
- endfor_nexthops(fi);
- res->fi = NULL;
- return 1;
- default:
- if (net_ratelimit())
- printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", type);
- res->fi = NULL;
- return -EINVAL;
+ }
+ endfor_nexthops(fi);
+ res->fi = NULL;
+ return 1;
+ default:
+ if (net_ratelimit())
+ printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
+ type);
+ res->fi = NULL;
+ return -EINVAL;
}
}
return err;
@@ -647,20 +648,20 @@ static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event,
{
struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr;
- switch(event) {
- case NETDEV_UP:
- dn_fib_add_ifaddr(ifa);
- dn_fib_sync_up(ifa->ifa_dev->dev);
+ switch (event) {
+ case NETDEV_UP:
+ dn_fib_add_ifaddr(ifa);
+ dn_fib_sync_up(ifa->ifa_dev->dev);
+ dn_rt_cache_flush(-1);
+ break;
+ case NETDEV_DOWN:
+ dn_fib_del_ifaddr(ifa);
+ if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
+ dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
+ } else {
dn_rt_cache_flush(-1);
- break;
- case NETDEV_DOWN:
- dn_fib_del_ifaddr(ifa);
- if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
- dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
- } else {
- dn_rt_cache_flush(-1);
- }
- break;
+ }
+ break;
}
return NOTIFY_DONE;
}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 9810610..7f0eb08 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -38,7 +38,7 @@
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <linux/jhash.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
#include <net/dst.h>
@@ -51,9 +51,9 @@
static int dn_neigh_construct(struct neighbour *);
static void dn_long_error_report(struct neighbour *, struct sk_buff *);
static void dn_short_error_report(struct neighbour *, struct sk_buff *);
-static int dn_long_output(struct sk_buff *);
-static int dn_short_output(struct sk_buff *);
-static int dn_phase3_output(struct sk_buff *);
+static int dn_long_output(struct neighbour *, struct sk_buff *);
+static int dn_short_output(struct neighbour *, struct sk_buff *);
+static int dn_phase3_output(struct neighbour *, struct sk_buff *);
/*
@@ -64,8 +64,6 @@ static const struct neigh_ops dn_long_ops = {
.error_report = dn_long_error_report,
.output = dn_long_output,
.connected_output = dn_long_output,
- .hh_output = dev_queue_xmit,
- .queue_xmit = dev_queue_xmit,
};
/*
@@ -76,8 +74,6 @@ static const struct neigh_ops dn_short_ops = {
.error_report = dn_short_error_report,
.output = dn_short_output,
.connected_output = dn_short_output,
- .hh_output = dev_queue_xmit,
- .queue_xmit = dev_queue_xmit,
};
/*
@@ -88,8 +84,6 @@ static const struct neigh_ops dn_phase3_ops = {
.error_report = dn_short_error_report, /* Can use short version here */
.output = dn_phase3_output,
.connected_output = dn_phase3_output,
- .hh_output = dev_queue_xmit,
- .queue_xmit = dev_queue_xmit
};
static u32 dn_neigh_hash(const void *pkey,
@@ -215,7 +209,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
dn_dn2eth(mac_addr, rt->rt_local_src);
if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha,
mac_addr, skb->len) >= 0)
- return neigh->ops->queue_xmit(skb);
+ return dev_queue_xmit(skb);
if (net_ratelimit())
printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n");
@@ -224,10 +218,8 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
return -EINVAL;
}
-static int dn_long_output(struct sk_buff *skb)
+static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *neigh = dst_get_neighbour(dst);
struct net_device *dev = neigh->dev;
int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
unsigned char *data;
@@ -271,10 +263,8 @@ static int dn_long_output(struct sk_buff *skb)
neigh->dev, dn_neigh_output_packet);
}
-static int dn_short_output(struct sk_buff *skb)
+static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *neigh = dst_get_neighbour(dst);
struct net_device *dev = neigh->dev;
int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
struct dn_short_packet *sp;
@@ -315,10 +305,8 @@ static int dn_short_output(struct sk_buff *skb)
* Phase 3 output is the same is short output, execpt that
* it clears the area bits before transmission.
*/
-static int dn_phase3_output(struct sk_buff *skb)
+static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *neigh = dst_get_neighbour(dst);
struct net_device *dev = neigh->dev;
int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
struct dn_short_packet *sp;
@@ -404,13 +392,13 @@ int dn_neigh_router_hello(struct sk_buff *skb)
dn->flags &= ~DN_NDFLAG_P3;
- switch(msg->iinfo & DN_RT_INFO_TYPE) {
- case DN_RT_INFO_L1RT:
- dn->flags &=~DN_NDFLAG_R2;
- dn->flags |= DN_NDFLAG_R1;
- break;
- case DN_RT_INFO_L2RT:
- dn->flags |= DN_NDFLAG_R2;
+ switch (msg->iinfo & DN_RT_INFO_TYPE) {
+ case DN_RT_INFO_L1RT:
+ dn->flags &=~DN_NDFLAG_R2;
+ dn->flags |= DN_NDFLAG_R1;
+ break;
+ case DN_RT_INFO_L2RT:
+ dn->flags |= DN_NDFLAG_R2;
}
}
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index b430549..73fa268 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -101,23 +101,27 @@ static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
unsigned short type = ((ack >> 12) & 0x0003);
int wakeup = 0;
- switch(type) {
- case 0: /* ACK - Data */
- if (dn_after(ack, scp->ackrcv_dat)) {
- scp->ackrcv_dat = ack & 0x0fff;
- wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack);
- }
- break;
- case 1: /* NAK - Data */
- break;
- case 2: /* ACK - OtherData */
- if (dn_after(ack, scp->ackrcv_oth)) {
- scp->ackrcv_oth = ack & 0x0fff;
- wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack);
- }
- break;
- case 3: /* NAK - OtherData */
- break;
+ switch (type) {
+ case 0: /* ACK - Data */
+ if (dn_after(ack, scp->ackrcv_dat)) {
+ scp->ackrcv_dat = ack & 0x0fff;
+ wakeup |= dn_nsp_check_xmit_queue(sk, skb,
+ &scp->data_xmit_queue,
+ ack);
+ }
+ break;
+ case 1: /* NAK - Data */
+ break;
+ case 2: /* ACK - OtherData */
+ if (dn_after(ack, scp->ackrcv_oth)) {
+ scp->ackrcv_oth = ack & 0x0fff;
+ wakeup |= dn_nsp_check_xmit_queue(sk, skb,
+ &scp->other_xmit_queue,
+ ack);
+ }
+ break;
+ case 3: /* NAK - OtherData */
+ break;
}
if (wakeup && !sock_flag(sk, SOCK_DEAD))
@@ -417,19 +421,19 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
scp->addrrem = cb->src_port;
sk->sk_state = TCP_CLOSE;
- switch(scp->state) {
- case DN_CI:
- case DN_CD:
- scp->state = DN_RJ;
- sk->sk_err = ECONNREFUSED;
- break;
- case DN_RUN:
- sk->sk_shutdown |= SHUTDOWN_MASK;
- scp->state = DN_DN;
- break;
- case DN_DI:
- scp->state = DN_DIC;
- break;
+ switch (scp->state) {
+ case DN_CI:
+ case DN_CD:
+ scp->state = DN_RJ;
+ sk->sk_err = ECONNREFUSED;
+ break;
+ case DN_RUN:
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ scp->state = DN_DN;
+ break;
+ case DN_DI:
+ scp->state = DN_DIC;
+ break;
}
if (!sock_flag(sk, SOCK_DEAD)) {
@@ -470,23 +474,23 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
sk->sk_state = TCP_CLOSE;
- switch(scp->state) {
- case DN_CI:
- scp->state = DN_NR;
- break;
- case DN_DR:
- if (reason == NSP_REASON_DC)
- scp->state = DN_DRC;
- if (reason == NSP_REASON_NL)
- scp->state = DN_CN;
- break;
- case DN_DI:
- scp->state = DN_DIC;
- break;
- case DN_RUN:
- sk->sk_shutdown |= SHUTDOWN_MASK;
- case DN_CC:
+ switch (scp->state) {
+ case DN_CI:
+ scp->state = DN_NR;
+ break;
+ case DN_DR:
+ if (reason == NSP_REASON_DC)
+ scp->state = DN_DRC;
+ if (reason == NSP_REASON_NL)
scp->state = DN_CN;
+ break;
+ case DN_DI:
+ scp->state = DN_DIC;
+ break;
+ case DN_RUN:
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ case DN_CC:
+ scp->state = DN_CN;
}
if (!sock_flag(sk, SOCK_DEAD)) {
@@ -692,16 +696,16 @@ static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
goto out;
if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) {
- switch(cb->nsp_flags & 0x70) {
- case 0x10:
- case 0x60: /* (Retransmitted) Connect Init */
- dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
- ret = NET_RX_SUCCESS;
- break;
- case 0x20: /* Connect Confirm */
- dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
- ret = NET_RX_SUCCESS;
- break;
+ switch (cb->nsp_flags & 0x70) {
+ case 0x10:
+ case 0x60: /* (Retransmitted) Connect Init */
+ dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
+ ret = NET_RX_SUCCESS;
+ break;
+ case 0x20: /* Connect Confirm */
+ dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
+ ret = NET_RX_SUCCESS;
+ break;
}
}
@@ -733,17 +737,17 @@ static int dn_nsp_rx_packet(struct sk_buff *skb)
* Filter out conninits and useless packet types
*/
if ((cb->nsp_flags & 0x0c) == 0x08) {
- switch(cb->nsp_flags & 0x70) {
- case 0x00: /* NOP */
- case 0x70: /* Reserved */
- case 0x50: /* Reserved, Phase II node init */
+ switch (cb->nsp_flags & 0x70) {
+ case 0x00: /* NOP */
+ case 0x70: /* Reserved */
+ case 0x50: /* Reserved, Phase II node init */
+ goto free_out;
+ case 0x10:
+ case 0x60:
+ if (unlikely(cb->rt_flags & DN_RT_F_RTS))
goto free_out;
- case 0x10:
- case 0x60:
- if (unlikely(cb->rt_flags & DN_RT_F_RTS))
- goto free_out;
- sk = dn_find_listener(skb, &reason);
- goto got_it;
+ sk = dn_find_listener(skb, &reason);
+ goto got_it;
}
}
@@ -836,20 +840,20 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* Control packet.
*/
if ((cb->nsp_flags & 0x0c) == 0x08) {
- switch(cb->nsp_flags & 0x70) {
- case 0x10:
- case 0x60:
- dn_nsp_conn_init(sk, skb);
- break;
- case 0x20:
- dn_nsp_conn_conf(sk, skb);
- break;
- case 0x30:
- dn_nsp_disc_init(sk, skb);
- break;
- case 0x40:
- dn_nsp_disc_conf(sk, skb);
- break;
+ switch (cb->nsp_flags & 0x70) {
+ case 0x10:
+ case 0x60:
+ dn_nsp_conn_init(sk, skb);
+ break;
+ case 0x20:
+ dn_nsp_conn_conf(sk, skb);
+ break;
+ case 0x30:
+ dn_nsp_disc_init(sk, skb);
+ break;
+ case 0x40:
+ dn_nsp_disc_conf(sk, skb);
+ break;
}
} else if (cb->nsp_flags == 0x24) {
@@ -890,15 +894,15 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
if (scp->state != DN_RUN)
goto free_out;
- switch(cb->nsp_flags) {
- case 0x10: /* LS */
- dn_nsp_linkservice(sk, skb);
- break;
- case 0x30: /* OD */
- dn_nsp_otherdata(sk, skb);
- break;
- default:
- dn_nsp_data(sk, skb);
+ switch (cb->nsp_flags) {
+ case 0x10: /* LS */
+ dn_nsp_linkservice(sk, skb);
+ break;
+ case 0x30: /* OD */
+ dn_nsp_otherdata(sk, skb);
+ break;
+ default:
+ dn_nsp_data(sk, skb);
}
} else { /* Ack, chuck it out here */
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 82d6250..94f4ec0 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -77,6 +77,7 @@
#include <linux/netfilter_decnet.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
+#include <linux/export.h>
#include <asm/errno.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
@@ -111,11 +112,12 @@ static unsigned long dn_rt_deadline;
static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
+static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr);
static int dn_route_input(struct sk_buff *);
static void dn_run_flush(unsigned long dummy);
@@ -133,12 +135,13 @@ static struct dst_ops dn_dst_ops = {
.gc = dn_dst_gc,
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
- .default_mtu = dn_dst_default_mtu,
+ .mtu = dn_dst_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
+ .neigh_lookup = dn_dst_neigh_lookup,
};
static void dn_dst_destroy(struct dst_entry *dst)
@@ -497,11 +500,11 @@ static int dn_route_rx_packet(struct sk_buff *skb)
}
if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
- switch(cb->rt_flags & DN_RT_PKT_MSK) {
- case DN_RT_PKT_SHORT:
- return dn_return_short(skb);
- case DN_RT_PKT_LONG:
- return dn_return_long(skb);
+ switch (cb->rt_flags & DN_RT_PKT_MSK) {
+ case DN_RT_PKT_SHORT:
+ return dn_return_short(skb);
+ case DN_RT_PKT_LONG:
+ return dn_return_long(skb);
}
}
@@ -654,38 +657,38 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
if (unlikely(skb_linearize(skb)))
goto dump_it;
- switch(flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_INIT:
- dn_dev_init_pkt(skb);
- break;
- case DN_RT_PKT_VERI:
- dn_dev_veri_pkt(skb);
- break;
+ switch (flags & DN_RT_CNTL_MSK) {
+ case DN_RT_PKT_INIT:
+ dn_dev_init_pkt(skb);
+ break;
+ case DN_RT_PKT_VERI:
+ dn_dev_veri_pkt(skb);
+ break;
}
if (dn->parms.state != DN_DEV_S_RU)
goto dump_it;
- switch(flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_HELO:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- skb, skb->dev, NULL,
- dn_route_ptp_hello);
-
- case DN_RT_PKT_L1RT:
- case DN_RT_PKT_L2RT:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
- skb, skb->dev, NULL,
- dn_route_discard);
- case DN_RT_PKT_ERTH:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- skb, skb->dev, NULL,
- dn_neigh_router_hello);
-
- case DN_RT_PKT_EEDH:
- return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
- skb, skb->dev, NULL,
- dn_neigh_endnode_hello);
+ switch (flags & DN_RT_CNTL_MSK) {
+ case DN_RT_PKT_HELO:
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_route_ptp_hello);
+
+ case DN_RT_PKT_L1RT:
+ case DN_RT_PKT_L2RT:
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
+ skb, skb->dev, NULL,
+ dn_route_discard);
+ case DN_RT_PKT_ERTH:
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_neigh_router_hello);
+
+ case DN_RT_PKT_EEDH:
+ return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
+ skb, skb->dev, NULL,
+ dn_neigh_endnode_hello);
}
} else {
if (dn->parms.state != DN_DEV_S_RU)
@@ -693,11 +696,11 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
skb_pull(skb, 1); /* Pull flags */
- switch(flags & DN_RT_PKT_MSK) {
- case DN_RT_PKT_LONG:
- return dn_route_rx_long(skb);
- case DN_RT_PKT_SHORT:
- return dn_route_rx_short(skb);
+ switch (flags & DN_RT_PKT_MSK) {
+ case DN_RT_PKT_LONG:
+ return dn_route_rx_long(skb);
+ case DN_RT_PKT_SHORT:
+ return dn_route_rx_short(skb);
}
}
@@ -707,6 +710,14 @@ out:
return NET_RX_DROP;
}
+static int dn_to_neigh_output(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *n = dst_get_neighbour(dst);
+
+ return n->output(n, skb);
+}
+
static int dn_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -735,7 +746,7 @@ static int dn_output(struct sk_buff *skb)
cb->hops = 0;
return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
- neigh->output);
+ dn_to_neigh_output);
error:
if (net_ratelimit())
@@ -752,7 +763,6 @@ static int dn_forward(struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
struct dn_route *rt;
- struct neighbour *neigh = dst_get_neighbour(dst);
int header_len;
#ifdef CONFIG_NETFILTER
struct net_device *dev = skb->dev;
@@ -785,7 +795,7 @@ static int dn_forward(struct sk_buff *skb)
cb->rt_flags |= DN_RT_F_IE;
return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
- neigh->output);
+ dn_to_neigh_output);
drop:
kfree_skb(skb);
@@ -815,9 +825,16 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
}
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
+{
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
+}
+
+static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
- return dst->dev->mtu;
+ return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
}
static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
@@ -1421,20 +1438,20 @@ make_route:
dst_set_neighbour(&rt->dst, neigh);
rt->dst.lastuse = jiffies;
rt->dst.output = dn_rt_bug;
- switch(res.type) {
- case RTN_UNICAST:
- rt->dst.input = dn_forward;
- break;
- case RTN_LOCAL:
- rt->dst.output = dn_output;
- rt->dst.input = dn_nsp_rx;
- rt->dst.dev = in_dev;
- flags |= RTCF_LOCAL;
- break;
- default:
- case RTN_UNREACHABLE:
- case RTN_BLACKHOLE:
- rt->dst.input = dst_discard;
+ switch (res.type) {
+ case RTN_UNICAST:
+ rt->dst.input = dn_forward;
+ break;
+ case RTN_LOCAL:
+ rt->dst.output = dn_output;
+ rt->dst.input = dn_nsp_rx;
+ rt->dst.dev = in_dev;
+ flags |= RTCF_LOCAL;
+ break;
+ default:
+ case RTN_UNREACHABLE:
+ case RTN_BLACKHOLE:
+ rt->dst.input = dst_discard;
}
rt->rt_flags = flags;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f0efb0c..f65c9dd 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/flow.h>
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index bd0a52d..a9a62f2 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -25,7 +25,7 @@
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/route.h> /* RTF_xxx */
#include <net/neighbour.h>
@@ -147,17 +147,18 @@ static void dn_rehash_zone(struct dn_zone *dz)
old_divisor = dz->dz_divisor;
- switch(old_divisor) {
- case 16:
- new_divisor = 256;
- new_hashmask = 0xFF;
- break;
- default:
- printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor);
- case 256:
- new_divisor = 1024;
- new_hashmask = 0x3FF;
- break;
+ switch (old_divisor) {
+ case 16:
+ new_divisor = 256;
+ new_hashmask = 0xFF;
+ break;
+ default:
+ printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
+ old_divisor);
+ case 256:
+ new_divisor = 1024;
+ new_hashmask = 0x3FF;
+ break;
}
ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 0982571..d9c150c 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -22,7 +22,7 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <net/sock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/flow.h>
#include <net/dn.h>
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
void dn_start_slow_timer(struct sock *sk)
{
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- sk->sk_timer.function = dn_slow_timer;
- sk->sk_timer.data = (unsigned long)sk;
-
- add_timer(&sk->sk_timer);
+ setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
- sock_hold(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->sk_timer.expires = jiffies + HZ / 10;
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
goto out;
}
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
scp->keepalive_fxn(sk);
}
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 64a7f39..69975e0 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -69,15 +69,15 @@ static void dnrmg_send_peer(struct sk_buff *skb)
int group = 0;
unsigned char flags = *skb->data;
- switch(flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_L1RT:
- group = DNRNG_NLGRP_L1;
- break;
- case DN_RT_PKT_L2RT:
- group = DNRNG_NLGRP_L2;
- break;
- default:
- return;
+ switch (flags & DN_RT_CNTL_MSK) {
+ case DN_RT_PKT_L1RT:
+ group = DNRNG_NLGRP_L1;
+ break;
+ case DN_RT_PKT_L2RT:
+ group = DNRNG_NLGRP_L2;
+ break;
+ default:
+ return;
}
skb2 = dnrmg_build_message(skb, &status);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index d1cc2fd..d50a13c 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -69,14 +69,15 @@ static struct ctl_table_header *dn_skeleton_table_header = NULL;
static void strip_it(char *str)
{
for(;;) {
- switch(*str) {
- case ' ':
- case '\n':
- case '\r':
- case ':':
- *str = 0;
- case 0:
- return;
+ switch (*str) {
+ case ' ':
+ case '\n':
+ case '\r':
+ case ':':
+ *str = 0;
+ /* Fallthrough */
+ case 0:
+ return;
}
str++;
}
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index c32be29..2022b46 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
if (!*_result)
goto put;
- memcpy(*_result, upayload->data, len + 1);
+ memcpy(*_result, upayload->data, len);
+ (*_result)[len] = '\0';
+
if (_expiry)
*_expiry = rkey->expiry;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 3fb14b7..0dc1589 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -12,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/dsa.h>
#include "dsa_priv.h"
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index 45f7411..9bd1061 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -118,10 +118,14 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
/*
- * Disable cascade port functionality, and set the switch's
+ * Disable cascade port functionality unless this device
+ * is used in a cascade configuration, and set the switch's
* DSA device number.
*/
- REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
+ if (ds->dst->pd->nr_chips > 1)
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+ else
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
/*
* Send all frames with destination addresses matching
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0a47b6c..56cf9b8 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = {
.ndo_start_xmit = dsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
@@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = {
.ndo_start_xmit = edsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
@@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = {
.ndo_start_xmit = trailer_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index a1d9f37..1c1f26c 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) fmt
+
#include <linux/module.h>
#include <linux/types.h>
@@ -44,7 +46,7 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
static const struct proto_ops econet_ops;
@@ -63,9 +65,7 @@ static DEFINE_SPINLOCK(aun_queue_lock);
static struct socket *udpsock;
#define AUN_PORT 0x8000
-
-struct aunhdr
-{
+struct aunhdr {
unsigned char code; /* AUN magic protocol byte */
unsigned char port;
unsigned char cb;
@@ -82,8 +82,7 @@ static struct timer_list ab_cleanup_timer;
#endif /* CONFIG_ECONET_AUNUDP */
/* Per-packet information */
-struct ec_cb
-{
+struct ec_cb {
struct sockaddr_ec sec;
unsigned long cookie; /* Supplied by user. */
#ifdef CONFIG_ECONET_AUNUDP
@@ -137,7 +136,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
* but then it will block.
*/
- skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
@@ -145,7 +144,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
* retries.
*/
- if(skb==NULL)
+ if (skb == NULL)
goto out;
/*
@@ -154,10 +153,9 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
*/
copied = skb->len;
- if (copied > len)
- {
- copied=len;
- msg->msg_flags|=MSG_TRUNC;
+ if (copied > len) {
+ copied = len;
+ msg->msg_flags |= MSG_TRUNC;
}
/* We can't use skb_copy_datagram here */
@@ -186,7 +184,8 @@ out:
* Bind an Econet socket.
*/
-static int econet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
{
struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
struct sock *sk;
@@ -226,9 +225,8 @@ static void tx_result(struct sock *sk, unsigned long cookie, int result)
struct ec_cb *eb;
struct sockaddr_ec *sec;
- if (skb == NULL)
- {
- printk(KERN_DEBUG "ec: memory squeeze, transmit result dropped.\n");
+ if (skb == NULL) {
+ pr_debug("econet: memory squeeze, transmit result dropped\n");
return;
}
@@ -265,7 +263,7 @@ static void ec_tx_done(struct sk_buff *skb, int result)
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
- struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
+ struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
@@ -298,14 +296,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
mutex_lock(&econet_mutex);
- if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
- mutex_unlock(&econet_mutex);
- return -EINVAL;
- }
- addr.station = saddr->addr.station;
- addr.net = saddr->addr.net;
- port = saddr->port;
- cb = saddr->cb;
+ if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
+ mutex_unlock(&econet_mutex);
+ return -EINVAL;
+ }
+ addr.station = saddr->addr.station;
+ addr.net = saddr->addr.net;
+ port = saddr->port;
+ cb = saddr->cb;
/* Look for a device with the right network number. */
dev = net2dev_map[addr.net];
@@ -333,9 +331,9 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
dev_hold(dev);
- skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
+ skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
msg->msg_flags & MSG_DONTWAIT, &err);
- if (skb==NULL)
+ if (skb == NULL)
goto out_unlock;
skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -355,7 +353,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct ec_framehdr *fh;
/* Poke in our control byte and
port number. Hack, hack. */
- fh = (struct ec_framehdr *)(skb->data);
+ fh = (struct ec_framehdr *)skb->data;
fh->cb = cb;
fh->port = port;
if (sock->type != SOCK_DGRAM) {
@@ -365,7 +363,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
}
/* Copy the data. Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
@@ -385,9 +383,9 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
mutex_unlock(&econet_mutex);
return len;
- out_free:
+out_free:
kfree_skb(skb);
- out_unlock:
+out_unlock:
if (dev)
dev_put(dev);
#else
@@ -458,15 +456,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
goto error_free_buf;
/* Get a skbuff (no data, just holds our cb information) */
- if ((skb = sock_alloc_send_skb(sk, 0,
- msg->msg_flags & MSG_DONTWAIT,
- &err)) == NULL)
+ skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
goto error_free_buf;
eb = (struct ec_cb *)&skb->cb;
eb->cookie = saddr->cookie;
- eb->timeout = (5*HZ);
+ eb->timeout = 5 * HZ;
eb->start = jiffies;
ah.handle = aun_seq;
eb->seq = (aun_seq++);
@@ -480,9 +477,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
udpmsg.msg_iovlen = 2;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
- udpmsg.msg_flags=0;
+ udpmsg.msg_flags = 0;
- oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
+ oldfs = get_fs();
+ set_fs(KERNEL_DS); /* More privs :-) */
err = sock_sendmsg(udpsock, &udpmsg, size);
set_fs(oldfs);
@@ -530,7 +528,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
static void econet_destroy_timer(unsigned long data)
{
- struct sock *sk=(struct sock *)data;
+ struct sock *sk = (struct sock *)data;
if (!sk_has_allocations(sk)) {
sk_free(sk);
@@ -539,7 +537,7 @@ static void econet_destroy_timer(unsigned long data)
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
- printk(KERN_DEBUG "econet socket destroy delayed\n");
+ pr_debug("econet: socket destroy delayed\n");
}
/*
@@ -651,7 +649,8 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
- if ((dev = dev_get_by_name(&init_net, ifr.ifr_name)) == NULL)
+ dev = dev_get_by_name(&init_net, ifr.ifr_name);
+ if (dev == NULL)
return -ENODEV;
sec = (struct sockaddr_ec *)&ifr.ifr_addr;
@@ -715,28 +714,26 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
* Handle generic ioctls
*/
-static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int econet_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
- switch(cmd) {
- case SIOCGSTAMP:
- return sock_get_timestamp(sk, argp);
+ switch (cmd) {
+ case SIOCGSTAMP:
+ return sock_get_timestamp(sk, argp);
- case SIOCGSTAMPNS:
- return sock_get_timestampns(sk, argp);
+ case SIOCGSTAMPNS:
+ return sock_get_timestampns(sk, argp);
- case SIOCSIFADDR:
- case SIOCGIFADDR:
- return ec_dev_ioctl(sock, cmd, argp);
- break;
+ case SIOCSIFADDR:
+ case SIOCGIFADDR:
+ return ec_dev_ioctl(sock, cmd, argp);
- default:
- return -ENOIOCTLCMD;
}
- /*NOTREACHED*/
- return 0;
+
+ return -ENOIOCTLCMD;
}
static const struct net_proto_family econet_family_ops = {
@@ -836,7 +833,7 @@ static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
udpmsg.msg_namelen = sizeof(sin);
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
- udpmsg.msg_flags=0;
+ udpmsg.msg_flags = 0;
kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
}
@@ -859,26 +856,25 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
if (dst)
edev = dst->dev->ec_ptr;
- if (! edev)
+ if (!edev)
goto bad;
- if ((sk = ec_listening_socket(ah->port, stn, edev->net)) == NULL)
+ sk = ec_listening_socket(ah->port, stn, edev->net);
+ if (sk == NULL)
goto bad; /* Nobody wants it */
newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
GFP_ATOMIC);
- if (newskb == NULL)
- {
- printk(KERN_DEBUG "AUN: memory squeeze, dropping packet.\n");
+ if (newskb == NULL) {
+ pr_debug("AUN: memory squeeze, dropping packet\n");
/* Send nack and hope sender tries again */
goto bad;
}
- memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah+1),
+ memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
len - sizeof(struct aunhdr));
- if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port))
- {
+ if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
/* Socket is bankrupt. */
kfree_skb(newskb);
goto bad;
@@ -914,7 +910,7 @@ static void aun_tx_ack(unsigned long seq, int result)
goto foundit;
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
- printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
+ pr_debug("AUN: unknown sequence %ld\n", seq);
return;
foundit:
@@ -939,18 +935,17 @@ static void aun_data_available(struct sock *sk, int slen)
while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
if (err == -EAGAIN) {
- printk(KERN_ERR "AUN: no data available?!");
+ pr_err("AUN: no data available?!\n");
return;
}
- printk(KERN_DEBUG "AUN: recvfrom() error %d\n", -err);
+ pr_debug("AUN: recvfrom() error %d\n", -err);
}
data = skb_transport_header(skb) + sizeof(struct udphdr);
ah = (struct aunhdr *)data;
len = skb->len - sizeof(struct udphdr);
- switch (ah->code)
- {
+ switch (ah->code) {
case 2:
aun_incoming(skb, ah, len);
break;
@@ -961,7 +956,7 @@ static void aun_data_available(struct sock *sk, int slen)
aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
break;
default:
- printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]);
+ pr_debug("AUN: unknown packet type: %d\n", data[0]);
}
skb_free_datagram(sk, skb);
@@ -991,7 +986,7 @@ static void ab_cleanup(unsigned long h)
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
- mod_timer(&ab_cleanup_timer, jiffies + (HZ*2));
+ mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
}
static int __init aun_udp_initialise(void)
@@ -1001,7 +996,7 @@ static int __init aun_udp_initialise(void)
skb_queue_head_init(&aun_queue);
setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
- ab_cleanup_timer.expires = jiffies + (HZ*2);
+ ab_cleanup_timer.expires = jiffies + (HZ * 2);
add_timer(&ab_cleanup_timer);
memset(&sin, 0, sizeof(sin));
@@ -1009,9 +1004,9 @@ static int __init aun_udp_initialise(void)
/* We can count ourselves lucky Acorn machines are too dim to
speak IPv6. :-) */
- if ((error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock)) < 0)
- {
- printk("AUN: socket error %d\n", -error);
+ error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
+ if (error < 0) {
+ pr_err("AUN: socket error %d\n", -error);
return error;
}
@@ -1020,10 +1015,9 @@ static int __init aun_udp_initialise(void)
from interrupts */
error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
- sizeof(sin));
- if (error < 0)
- {
- printk("AUN: bind error %d\n", -error);
+ sizeof(sin));
+ if (error < 0) {
+ pr_err("AUN: bind error %d\n", -error);
goto release;
}
@@ -1044,7 +1038,8 @@ release:
* Receive an Econet frame from a device.
*/
-static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
{
struct ec_framehdr *hdr;
struct sock *sk = NULL;
@@ -1059,13 +1054,14 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
if (!edev)
goto drop;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == NULL)
return NET_RX_DROP;
if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
goto drop;
- hdr = (struct ec_framehdr *) skb->data;
+ hdr = (struct ec_framehdr *)skb->data;
/* First check for encapsulated IP */
if (hdr->port == EC_PORT_IP) {
@@ -1093,8 +1089,8 @@ drop:
}
static struct packet_type econet_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_ECONET),
- .func = econet_rcv,
+ .type = cpu_to_be16(ETH_P_ECONET),
+ .func = econet_rcv,
};
static void econet_hw_initialise(void)
@@ -1104,9 +1100,10 @@ static void econet_hw_initialise(void)
#endif
-static int econet_notifier(struct notifier_block *this, unsigned long msg, void *data)
+static int econet_notifier(struct notifier_block *this, unsigned long msg,
+ void *data)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_device *dev = data;
struct ec_device *edev;
if (!net_eq(dev_net(dev), &init_net))
@@ -1116,8 +1113,7 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void
case NETDEV_UNREGISTER:
/* A device has gone down - kill any data we hold for it. */
edev = dev->ec_ptr;
- if (edev)
- {
+ if (edev) {
if (net2dev_map[0] == dev)
net2dev_map[0] = NULL;
net2dev_map[edev->net] = NULL;
@@ -1131,7 +1127,7 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void
}
static struct notifier_block econet_netdev_notifier = {
- .notifier_call =econet_notifier,
+ .notifier_call = econet_notifier,
};
static void __exit econet_proto_exit(void)
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1c1de97..7dee650 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -10,3 +10,9 @@ config IEEE802154
Say Y here to compile LR-WPAN support into the kernel or say M to
compile it as modules.
+
+config IEEE802154_6LOWPAN
+ tristate "6lowpan support over IEEE 802.15.4"
+ depends on IEEE802154 && IPV6
+ ---help---
+ IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 5761185..d7716d6 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
-af_802154-y := af_ieee802154.o raw.o dgram.o
+obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
+obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
+
+ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
+af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 6df6ecf..40e606f 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -302,7 +302,7 @@ static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (!netif_running(dev))
- return -ENODEV;
+ goto drop;
pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
#ifdef DEBUG
print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1a3334c..faecf64 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -1,5 +1,5 @@
/*
- * ZigBee socket interface
+ * IEEE 802.15.4 dgram socket interface
*
* Copyright 2007, 2008 Siemens AG
*
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 71ee110..adaf462 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -30,6 +30,7 @@
#include <net/genetlink.h>
#include <net/sock.h>
#include <linux/nl802154.h>
+#include <linux/export.h>
#include <net/af_ieee802154.h>
#include <net/nl802154.h>
#include <net/ieee802154.h>
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 02548b2..c64a38d 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/if_arp.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/wpan-phy.h>
@@ -213,12 +214,37 @@ static int ieee802154_add_iface(struct sk_buff *skb,
goto nla_put_failure;
}
+ if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
+ nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
+ IEEE802154_ADDR_LEN) {
+ rc = -EINVAL;
+ goto nla_put_failure;
+ }
+
dev = phy->add_iface(phy, devname);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
}
+ if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
+ struct sockaddr addr;
+
+ addr.sa_family = ARPHRD_IEEE802154;
+ nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
+ IEEE802154_ADDR_LEN);
+
+ /*
+ * strangely enough, some callbacks (inetdev_event) from
+ * dev_set_mac_address require RTNL_LOCK
+ */
+ rtnl_lock();
+ rc = dev_set_mac_address(dev, &addr);
+ rtnl_unlock();
+ if (rc)
+ goto dev_unregister;
+ }
+
NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
@@ -228,6 +254,11 @@ static int ieee802154_add_iface(struct sk_buff *skb,
return ieee802154_nl_reply(msg, info);
+dev_unregister:
+ rtnl_lock(); /* del_iface must be called with RTNL lock */
+ phy->del_iface(phy, dev);
+ dev_put(dev);
+ rtnl_unlock();
nla_put_failure:
nlmsg_free(msg);
out_dev:
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f5fdfcb..7d3b438 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -199,6 +199,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_MPATH_DEBUG
+ bool "Verbose mesh path debugging"
+ depends on MAC80211_DEBUG_MENU
+ depends on MAC80211_MESH
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose mesh path selection debugging messages (when mac80211
+ is taking part in a mesh network).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_VERBOSE_MHWMP_DEBUG
bool "Verbose mesh HWMP routing debugging"
depends on MAC80211_DEBUG_MENU
@@ -212,6 +225,18 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_TDLS_DEBUG
+ bool "Verbose TDLS debugging"
+ depends on MAC80211_DEBUG_MENU
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose TDLS selection debugging messages (when mac80211
+ is a TDLS STA).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_DEBUG_COUNTERS
bool "Extra statistics for TX/RX debugging"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index b9b595c..0785e95 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/aes.h>
#include <net/mac80211.h>
#include "key.h"
@@ -21,21 +22,21 @@ static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a)
int i;
u8 *b_0, *aad, *b, *s_0;
- b_0 = scratch + 3 * AES_BLOCK_LEN;
- aad = scratch + 4 * AES_BLOCK_LEN;
+ b_0 = scratch + 3 * AES_BLOCK_SIZE;
+ aad = scratch + 4 * AES_BLOCK_SIZE;
b = scratch;
- s_0 = scratch + AES_BLOCK_LEN;
+ s_0 = scratch + AES_BLOCK_SIZE;
crypto_cipher_encrypt_one(tfm, b, b_0);
/* Extra Authenticate-only data (always two AES blocks) */
- for (i = 0; i < AES_BLOCK_LEN; i++)
+ for (i = 0; i < AES_BLOCK_SIZE; i++)
aad[i] ^= b[i];
crypto_cipher_encrypt_one(tfm, b, aad);
- aad += AES_BLOCK_LEN;
+ aad += AES_BLOCK_SIZE;
- for (i = 0; i < AES_BLOCK_LEN; i++)
+ for (i = 0; i < AES_BLOCK_SIZE; i++)
aad[i] ^= b[i];
crypto_cipher_encrypt_one(tfm, a, aad);
@@ -57,12 +58,12 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
u8 *pos, *cpos, *b, *s_0, *e, *b_0;
b = scratch;
- s_0 = scratch + AES_BLOCK_LEN;
- e = scratch + 2 * AES_BLOCK_LEN;
- b_0 = scratch + 3 * AES_BLOCK_LEN;
+ s_0 = scratch + AES_BLOCK_SIZE;
+ e = scratch + 2 * AES_BLOCK_SIZE;
+ b_0 = scratch + 3 * AES_BLOCK_SIZE;
- num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last_len = data_len % AES_BLOCK_LEN;
+ num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE);
+ last_len = data_len % AES_BLOCK_SIZE;
aes_ccm_prepare(tfm, scratch, b);
/* Process payload blocks */
@@ -70,7 +71,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
cpos = cdata;
for (j = 1; j <= num_blocks; j++) {
int blen = (j == num_blocks && last_len) ?
- last_len : AES_BLOCK_LEN;
+ last_len : AES_BLOCK_SIZE;
/* Authentication followed by encryption */
for (i = 0; i < blen; i++)
@@ -96,12 +97,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
u8 *pos, *cpos, *b, *s_0, *a, *b_0;
b = scratch;
- s_0 = scratch + AES_BLOCK_LEN;
- a = scratch + 2 * AES_BLOCK_LEN;
- b_0 = scratch + 3 * AES_BLOCK_LEN;
+ s_0 = scratch + AES_BLOCK_SIZE;
+ a = scratch + 2 * AES_BLOCK_SIZE;
+ b_0 = scratch + 3 * AES_BLOCK_SIZE;
- num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
- last_len = data_len % AES_BLOCK_LEN;
+ num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE);
+ last_len = data_len % AES_BLOCK_SIZE;
aes_ccm_prepare(tfm, scratch, a);
/* Process payload blocks */
@@ -109,7 +110,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
pos = data;
for (j = 1; j <= num_blocks; j++) {
int blen = (j == num_blocks && last_len) ?
- last_len : AES_BLOCK_LEN;
+ last_len : AES_BLOCK_SIZE;
/* Decryption followed by authentication */
b_0[14] = (j >> 8) & 0xff;
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6e7820e..5b7d744 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,8 +12,6 @@
#include <linux/crypto.h>
-#define AES_BLOCK_LEN 16
-
struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]);
void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
u8 *data, size_t data_len,
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index d502b26..8dfd70d 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -11,12 +11,12 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
+#include <crypto/aes.h>
#include <net/mac80211.h>
#include "key.h"
#include "aes_cmac.h"
-#define AES_BLOCK_SIZE 16
#define AES_CMAC_KEY_LEN 16
#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
#define AAD_LEN 20
@@ -35,10 +35,10 @@ static void gf_mulx(u8 *pad)
}
-static void aes_128_cmac_vector(struct crypto_cipher *tfm, u8 *scratch,
- size_t num_elem,
+static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
const u8 *addr[], const size_t *len, u8 *mac)
{
+ u8 scratch[2 * AES_BLOCK_SIZE];
u8 *cbc, *pad;
const u8 *pos, *end;
size_t i, e, left, total_len;
@@ -95,7 +95,7 @@ static void aes_128_cmac_vector(struct crypto_cipher *tfm, u8 *scratch,
}
-void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad,
+void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic)
{
const u8 *addr[3];
@@ -110,7 +110,7 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad,
addr[2] = zero;
len[2] = CMAC_TLEN;
- aes_128_cmac_vector(tfm, scratch, 3, addr, len, mic);
+ aes_128_cmac_vector(tfm, 3, addr, len, mic);
}
diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h
index 0eb9a48..20785a6 100644
--- a/net/mac80211/aes_cmac.h
+++ b/net/mac80211/aes_cmac.h
@@ -12,7 +12,7 @@
#include <linux/crypto.h>
struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]);
-void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad,
+void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic);
void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 1a41b14..6174785 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -38,6 +38,7 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -48,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
- del_timer_sync(&tid_rx->reorder_timer);
-
for (i = 0; i < tid_rx->buf_size; i++)
dev_kfree_skb(tid_rx->reorder_buf[i]);
kfree(tid_rx->reorder_buf);
@@ -71,7 +70,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
if (!tid_rx)
return;
- rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
+ RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -90,6 +89,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
del_timer_sync(&tid_rx->session_timer);
+ /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
+ spin_lock_bh(&tid_rx->reorder_lock);
+ tid_rx->removed = true;
+ spin_unlock_bh(&tid_rx->reorder_lock);
+ del_timer_sync(&tid_rx->reorder_timer);
+
call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
@@ -101,6 +106,29 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
mutex_unlock(&sta->ampdu_mlme.mtx);
}
+void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
+ const u8 *addr)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct sta_info *sta;
+ int i;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, addr);
+ if (!sta) {
+ rcu_read_unlock();
+ return;
+ }
+
+ for (i = 0; i < STA_TID_NUM; i++)
+ if (ba_rx_bitmap & BIT(i))
+ set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
+
+ ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
+
/*
* After accepting the AddBA Request we activated a timer,
* resetting it after each frame that arrives from the originator.
@@ -145,12 +173,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
u16 capab;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer "
- "for addba resp frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -205,7 +229,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying ADDBA request\n");
@@ -248,19 +272,17 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
"%pM on tid %u\n",
mgmt->sa, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- goto end;
+
+ /* delete existing Rx BA session on the same tid */
+ ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_STATUS_UNSPECIFIED_QOS,
+ false);
}
/* prepare A-MPDU MLME for Rx aggregation */
tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
- if (!tid_agg_rx) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
- tid);
-#endif
+ if (!tid_agg_rx)
goto end;
- }
spin_lock_init(&tid_agg_rx->reorder_lock);
@@ -280,11 +302,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
tid_agg_rx->reorder_time =
kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "can not allocate reordering buffer "
- "to tid %d\n", tid);
-#endif
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b7f4f5c..2e4b961 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -15,6 +15,7 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -68,11 +69,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer "
- "for addba request frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
@@ -106,19 +105,18 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_skb(sdata, skb);
}
-void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
+void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_bar *bar;
u16 bar_control = 0;
skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer for "
- "bar frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
memset(bar, 0, sizeof(*bar));
@@ -128,13 +126,14 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
- bar_control |= (u16)(tid << 12);
+ bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
+EXPORT_SYMBOL(ieee80211_send_bar);
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx)
@@ -412,7 +411,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if ((tid >= STA_TID_NUM) ||
- !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
+ (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -431,7 +431,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA sessions blocked. "
"Denying BA session request\n");
@@ -461,11 +461,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
/* prepare A-MPDU MLME for Tx aggregation */
tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
if (!tid_tx) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
- tid);
-#endif
ret = -ENOMEM;
goto err_unlock_sta;
}
@@ -590,14 +585,9 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
- if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Not enough memory, "
- "dropping start BA session", sdata->name);
-#endif
+ if (unlikely(!skb))
return;
- }
+
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
@@ -743,14 +733,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
- if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Not enough memory, "
- "dropping stop BA session", sdata->name);
-#endif
+ if (unlikely(!skb))
return;
- }
+
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
@@ -809,17 +794,14 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
goto out;
}
+ /*
+ * IEEE 802.11-2007 7.3.1.14:
+ * In an ADDBA Response frame, when the Status Code field
+ * is set to 0, the Buffer Size subfield is set to a value
+ * of at least 1.
+ */
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
- == WLAN_STATUS_SUCCESS) {
- /*
- * IEEE 802.11-2007 7.3.1.14:
- * In an ADDBA Response frame, when the Status Code field
- * is set to 0, the Buffer Size subfield is set to a value
- * of at least 1.
- */
- if (!buf_size)
- goto out;
-
+ == WLAN_STATUS_SUCCESS && buf_size) {
if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
&tid_tx->state)) {
/* ignore duplicate response */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 143a006..11cee76 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -62,7 +63,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
else if (type == NL80211_IFTYPE_STATION &&
params && params->use_4addr >= 0)
sdata->u.mgd.use_4addr = params->use_4addr;
@@ -209,6 +210,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
u8 seq[6] = {0};
struct key_params params;
struct ieee80211_key *key = NULL;
+ u64 pn64;
u32 iv32;
u16 iv16;
int err = -ENOENT;
@@ -256,22 +258,24 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.seq_len = 6;
break;
case WLAN_CIPHER_SUITE_CCMP:
- seq[0] = key->u.ccmp.tx_pn[5];
- seq[1] = key->u.ccmp.tx_pn[4];
- seq[2] = key->u.ccmp.tx_pn[3];
- seq[3] = key->u.ccmp.tx_pn[2];
- seq[4] = key->u.ccmp.tx_pn[1];
- seq[5] = key->u.ccmp.tx_pn[0];
+ pn64 = atomic64_read(&key->u.ccmp.tx_pn);
+ seq[0] = pn64;
+ seq[1] = pn64 >> 8;
+ seq[2] = pn64 >> 16;
+ seq[3] = pn64 >> 24;
+ seq[4] = pn64 >> 32;
+ seq[5] = pn64 >> 40;
params.seq = seq;
params.seq_len = 6;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- seq[0] = key->u.aes_cmac.tx_pn[5];
- seq[1] = key->u.aes_cmac.tx_pn[4];
- seq[2] = key->u.aes_cmac.tx_pn[3];
- seq[3] = key->u.aes_cmac.tx_pn[2];
- seq[4] = key->u.aes_cmac.tx_pn[1];
- seq[5] = key->u.aes_cmac.tx_pn[0];
+ pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
+ seq[0] = pn64;
+ seq[1] = pn64 >> 8;
+ seq[2] = pn64 >> 16;
+ seq[3] = pn64 >> 24;
+ seq[4] = pn64 >> 32;
+ seq[5] = pn64 >> 40;
params.seq = seq;
params.seq_len = 6;
break;
@@ -340,7 +344,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
STATION_INFO_RX_BITRATE |
STATION_INFO_RX_DROP_MISC |
STATION_INFO_BSS_PARAM |
- STATION_INFO_CONNECTED_TIME;
+ STATION_INFO_CONNECTED_TIME |
+ STATION_INFO_STA_FLAGS;
do_posix_clock_monotonic_gettime(&uptime);
sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -400,6 +405,23 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
+
+ sinfo->sta_flags.set = 0;
+ sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
+ BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+ BIT(NL80211_STA_FLAG_WME) |
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+ if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
+ if (test_sta_flag(sta, WLAN_STA_WME))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
+ if (test_sta_flag(sta, WLAN_STA_MFP))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
+ if (test_sta_flag(sta, WLAN_STA_AUTH))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
}
@@ -452,6 +474,20 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
+ struct beacon_parameters *params)
+{
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+
+ bss_conf->ssid_len = params->ssid_len;
+
+ if (params->ssid_len)
+ memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
+
+ bss_conf->hidden_ssid =
+ (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
+}
+
/*
* This handles both adding a beacon and setting new beacon info
*/
@@ -545,8 +581,11 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
kfree(old);
+ ieee80211_config_ap_ssid(sdata, params);
+
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_BEACON);
+ BSS_CHANGED_BEACON |
+ BSS_CHANGED_SSID);
return 0;
}
@@ -591,7 +630,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
if (!old)
return -ENOENT;
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old);
@@ -647,7 +686,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
{
- unsigned long flags;
u32 rates;
int i, j;
struct ieee80211_supported_band *sband;
@@ -656,40 +694,58 @@ static void sta_apply_parameters(struct ieee80211_local *local,
sband = local->hw.wiphy->bands[local->oper_channel->band];
- spin_lock_irqsave(&sta->flaglock, flags);
mask = params->sta_flags_mask;
set = params->sta_flags_set;
if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
- sta->flags &= ~WLAN_STA_AUTHORIZED;
if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- sta->flags |= WLAN_STA_AUTHORIZED;
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ else
+ clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
}
if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
- sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
- sta->flags |= WLAN_STA_SHORT_PREAMBLE;
+ set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
+ else
+ clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
}
if (mask & BIT(NL80211_STA_FLAG_WME)) {
- sta->flags &= ~WLAN_STA_WME;
- if (set & BIT(NL80211_STA_FLAG_WME))
- sta->flags |= WLAN_STA_WME;
+ if (set & BIT(NL80211_STA_FLAG_WME)) {
+ set_sta_flag(sta, WLAN_STA_WME);
+ sta->sta.wme = true;
+ } else {
+ clear_sta_flag(sta, WLAN_STA_WME);
+ sta->sta.wme = false;
+ }
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
- sta->flags &= ~WLAN_STA_MFP;
if (set & BIT(NL80211_STA_FLAG_MFP))
- sta->flags |= WLAN_STA_MFP;
+ set_sta_flag(sta, WLAN_STA_MFP);
+ else
+ clear_sta_flag(sta, WLAN_STA_MFP);
}
if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
- sta->flags &= ~WLAN_STA_AUTH;
if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
- sta->flags |= WLAN_STA_AUTH;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ else
+ clear_sta_flag(sta, WLAN_STA_AUTH);
+ }
+
+ if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
+ if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ set_sta_flag(sta, WLAN_STA_TDLS_PEER);
+ else
+ clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
+ }
+
+ if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
+ sta->sta.uapsd_queues = params->uapsd_queues;
+ sta->sta.max_sp = params->max_sp;
}
- spin_unlock_irqrestore(&sta->flaglock, flags);
/*
* cfg80211 validates this (1-2007) and allows setting the AID
@@ -776,11 +832,18 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (is_multicast_ether_addr(mac))
return -EINVAL;
+ /* Only TDLS-supporting stations can add TDLS peers */
+ if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION))
+ return -ENOTSUPP;
+
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
if (!sta)
return -ENOMEM;
- sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_ASSOC);
sta_apply_parameters(local, sta, params);
@@ -836,6 +899,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
return -ENOENT;
}
+ /* The TDLS bit cannot be toggled after the STA was added */
+ if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
+ !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
if (params->vlan && params->vlan != sta->sdata->dev) {
vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -912,7 +983,7 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
if (dst)
return mesh_path_del(dst, sdata);
- mesh_path_flush(sdata);
+ mesh_path_flush_by_iface(sdata);
return 0;
}
@@ -1131,6 +1202,22 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
ieee80211_mesh_root_setup(ifmsh);
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) {
+ /* our current gate announcement implementation rides on root
+ * announcements, so require this ifmsh to also be a root node
+ * */
+ if (nconf->dot11MeshGateAnnouncementProtocol &&
+ !conf->dot11MeshHWMPRootMode) {
+ conf->dot11MeshHWMPRootMode = 1;
+ ieee80211_mesh_root_setup(ifmsh);
+ }
+ conf->dot11MeshGateAnnouncementProtocol =
+ nconf->dot11MeshGateAnnouncementProtocol;
+ }
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
+ conf->dot11MeshHWMPRannInterval =
+ nconf->dot11MeshHWMPRannInterval;
+ }
return 0;
}
@@ -1229,9 +1316,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
static int ieee80211_set_txq_params(struct wiphy *wiphy,
+ struct net_device *dev,
struct ieee80211_txq_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_tx_queue_params p;
if (!local->ops->conf_tx)
@@ -1249,7 +1338,11 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
*/
p.uapsd = false;
- if (drv_conf_tx(local, params->queue, &p)) {
+ if (params->queue >= local->hw.queues)
+ return -EINVAL;
+
+ sdata->tx_conf[params->queue] = p;
+ if (drv_conf_tx(local, sdata, params->queue, &p)) {
wiphy_debug(local->hw.wiphy,
"failed to set TX queue parameters for queue %d\n",
params->queue);
@@ -1554,6 +1647,19 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
return local->ops->testmode_cmd(&local->hw, data, len);
}
+
+static int ieee80211_testmode_dump(struct wiphy *wiphy,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ if (!local->ops->testmode_dump)
+ return -EOPNOTSUPP;
+
+ return local->ops->testmode_dump(&local->hw, skb, cb, data, len);
+}
#endif
int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -1810,7 +1916,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
@@ -1837,6 +1944,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
}
+ if (no_cck)
+ flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+
if (is_offchan && !offchan)
return -EBUSY;
@@ -1875,33 +1985,6 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = (unsigned long) skb;
- if (is_offchan && local->ops->offchannel_tx) {
- int ret;
-
- IEEE80211_SKB_CB(skb)->band = chan->band;
-
- mutex_lock(&local->mtx);
-
- if (local->hw_offchan_tx_cookie) {
- mutex_unlock(&local->mtx);
- return -EBUSY;
- }
-
- /* TODO: bitrate control, TX processing? */
- ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
-
- if (ret == 0)
- local->hw_offchan_tx_cookie = *cookie;
- mutex_unlock(&local->mtx);
-
- /*
- * Allow driver to return 1 to indicate it wants to have the
- * frame transmitted with a remain_on_channel + regular TX.
- */
- if (ret != 1)
- return ret;
- }
-
if (is_offchan && local->ops->remain_on_channel) {
unsigned int duration;
int ret;
@@ -1988,18 +2071,6 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
mutex_lock(&local->mtx);
- if (local->ops->offchannel_tx_cancel_wait &&
- local->hw_offchan_tx_cookie == cookie) {
- ret = drv_offchannel_tx_cancel_wait(local);
-
- if (!ret)
- local->hw_offchan_tx_cookie = 0;
-
- mutex_unlock(&local->mtx);
-
- return ret;
- }
-
if (local->ops->cancel_remain_on_channel) {
cookie ^= 2;
ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
@@ -2085,6 +2156,338 @@ static void ieee80211_get_ringparam(struct wiphy *wiphy,
drv_get_ringparam(local, tx, tx_max, rx, rx_max);
}
+static int ieee80211_set_rekey_data(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ if (!local->ops->set_rekey_data)
+ return -EOPNOTSUPP;
+
+ drv_set_rekey_data(local, sdata, data);
+
+ return 0;
+}
+
+static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 7);
+
+ *pos++ = WLAN_EID_EXT_CAPABILITY;
+ *pos++ = 5; /* len */
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+}
+
+static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ u16 capab;
+
+ capab = 0;
+ if (local->oper_channel->band != IEEE80211_BAND_2GHZ)
+ return capab;
+
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+
+ return capab;
+}
+
+static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
+ u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+static int
+ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_tdls_data *tf;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_mgmt *mgmt;
+
+ mgmt = (void *)skb_put(skb, 24);
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb = NULL;
+ bool send_direct;
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in managed mode, and associated */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
+ printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
+#endif
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ 50 + /* supported rates */
+ 7 + /* ext capab */
+ extra_ies_len +
+ sizeof(struct ieee80211_tdls_lnkie));
+ if (!skb)
+ return -ENOMEM;
+
+ info = IEEE80211_SKB_CB(skb);
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
+ action_code, dialog_token,
+ status_code, skb);
+ send_direct = false;
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ send_direct = true;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ if (ret < 0)
+ goto fail;
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last */
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ /* we are the initiator */
+ ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
+ sdata->u.mgd.bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ /* we are the responder */
+ ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
+ sdata->u.mgd.bssid);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto fail;
+ }
+
+ if (send_direct) {
+ ieee80211_tx_skb(sdata, skb);
+ return 0;
+ }
+
+ /*
+ * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
+ * we should default to AC_VI.
+ */
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb_set_queue_mapping(skb, IEEE80211_AC_BK);
+ skb->priority = 2;
+ break;
+ default:
+ skb_set_queue_mapping(skb, IEEE80211_AC_VI);
+ skb->priority = 5;
+ break;
+ }
+
+ /* disable bottom halves when entering the Tx path */
+ local_bh_disable();
+ ret = ieee80211_subif_start_xmit(skb, dev);
+ local_bh_enable();
+
+ return ret;
+
+fail:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper)
+{
+ struct sta_info *sta;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
+ printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
+#endif
+
+ switch (oper) {
+ case NL80211_TDLS_ENABLE_LINK:
+ rcu_read_lock();
+ sta = sta_info_get(sdata, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ return -ENOLINK;
+ }
+
+ set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
+ rcu_read_unlock();
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ return sta_info_destroy_addr(sdata, peer);
+ case NL80211_TDLS_TEARDOWN:
+ case NL80211_TDLS_SETUP:
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* We don't support in-driver setup/teardown/discovery */
+ return -ENOTSUPP;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2134,6 +2537,7 @@ struct cfg80211_ops mac80211_config_ops = {
.set_wds_peer = ieee80211_set_wds_peer,
.rfkill_poll = ieee80211_rfkill_poll,
CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump)
.set_power_mgmt = ieee80211_set_power_mgmt,
.set_bitrate_mask = ieee80211_set_bitrate_mask,
.remain_on_channel = ieee80211_remain_on_channel,
@@ -2146,4 +2550,7 @@ struct cfg80211_ops mac80211_config_ops = {
.get_antenna = ieee80211_get_antenna,
.set_ringparam = ieee80211_set_ringparam,
.get_ringparam = ieee80211_get_ringparam,
+ .set_rekey_data = ieee80211_set_rekey_data,
+ .tdls_oper = ieee80211_tdls_oper,
+ .tdls_mgmt = ieee80211_tdls_mgmt,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 186e02f..883996b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -78,57 +78,6 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
-static ssize_t tsf_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- u64 tsf;
-
- tsf = drv_get_tsf(local);
-
- return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
- (unsigned long long) tsf);
-}
-
-static ssize_t tsf_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- unsigned long long tsf;
- char buf[100];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- if (strncmp(buf, "reset", 5) == 0) {
- if (local->ops->reset_tsf) {
- drv_reset_tsf(local);
- wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
- }
- } else {
- tsf = simple_strtoul(buf, NULL, 0);
- if (local->ops->set_tsf) {
- drv_set_tsf(local, tsf);
- wiphy_info(local->hw.wiphy,
- "debugfs set TSF to %#018llx\n", tsf);
-
- }
- }
-
- return count;
-}
-
-static const struct file_operations tsf_ops = {
- .read = tsf_read,
- .write = tsf_write,
- .open = mac80211_open_file_generic,
- .llseek = default_llseek,
-};
-
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -195,20 +144,12 @@ static ssize_t uapsd_queues_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- unsigned long val;
- char buf[10];
- size_t len;
+ u8 val;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = strict_strtoul(buf, 0, &val);
-
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
if (ret)
- return -EINVAL;
+ return ret;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
return -ERANGE;
@@ -305,6 +246,9 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
char *buf = kzalloc(mxln, GFP_KERNEL);
int sf = 0; /* how many written so far */
+ if (!buf)
+ return 0;
+
sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
@@ -355,6 +299,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
+ if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
+ sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
@@ -450,7 +396,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(frequency);
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
- DEBUGFS_ADD(tsf);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 33c58b8..38e6101 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -78,7 +78,7 @@ KEY_OPS(algorithm);
static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- const u8 *tpn;
+ u64 pn;
char buf[20];
int len;
struct ieee80211_key *key = file->private_data;
@@ -94,15 +94,16 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
key->u.tkip.tx.iv16);
break;
case WLAN_CIPHER_SUITE_CCMP:
- tpn = key->u.ccmp.tx_pn;
+ pn = atomic64_read(&key->u.ccmp.tx_pn);
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
- tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]);
+ (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+ (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- tpn = key->u.aes_cmac.tx_pn;
+ pn = atomic64_read(&key->u.aes_cmac.tx_pn);
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
- tpn[0], tpn[1], tpn[2], tpn[3], tpn[4],
- tpn[5]);
+ (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
+ (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
break;
default:
return 0;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9ea7c0d..0228ecb 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -21,6 +21,7 @@
#include "rate.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
+#include "driver-ops.h"
static ssize_t ieee80211_if_read(
struct ieee80211_sub_if_data *sdata,
@@ -32,8 +33,7 @@ static ssize_t ieee80211_if_read(
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
- if (sdata->dev->reg_state == NETREG_REGISTERED)
- ret = (*format)(sdata, buf, sizeof(buf));
+ ret = (*format)(sdata, buf, sizeof(buf));
read_unlock(&dev_base_lock);
if (ret >= 0)
@@ -61,8 +61,7 @@ static ssize_t ieee80211_if_write(
ret = -ENODEV;
rtnl_lock();
- if (sdata->dev->reg_state == NETREG_REGISTERED)
- ret = (*write)(sdata, buf, count);
+ ret = (*write)(sdata, buf, count);
rtnl_unlock();
freebuf:
@@ -331,6 +330,46 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
}
__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
+/* IBSS attributes */
+static ssize_t ieee80211_if_fmt_tsf(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ u64 tsf;
+
+ tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata);
+
+ return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf);
+}
+
+static ssize_t ieee80211_if_parse_tsf(
+ struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ unsigned long long tsf;
+ int ret;
+
+ if (strncmp(buf, "reset", 5) == 0) {
+ if (local->ops->reset_tsf) {
+ drv_reset_tsf(local, sdata);
+ wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
+ }
+ } else {
+ ret = kstrtoull(buf, 10, &tsf);
+ if (ret < 0)
+ return -EINVAL;
+ if (local->ops->set_tsf) {
+ drv_set_tsf(local, sdata, tsf);
+ wiphy_info(local->hw.wiphy,
+ "debugfs set TSF to %#018llx\n", tsf);
+ }
+ }
+
+ return buflen;
+}
+__IEEE80211_IF_FILE_W(tsf);
+
+
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -340,6 +379,8 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
+IEEE80211_IF_FILE(dropped_frames_congestion,
+ u.mesh.mshstats.dropped_frames_congestion, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
@@ -372,6 +413,10 @@ IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
+IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
+ u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
+ u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
#endif
@@ -415,6 +460,11 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
+static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
+{
+ DEBUGFS_ADD_MODE(tsf, 0600);
+}
+
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
@@ -459,6 +509,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
+ MESHSTATS_ADD(dropped_frames_congestion);
MESHSTATS_ADD(estab_plinks);
#undef MESHSTATS_ADD
}
@@ -485,7 +536,9 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
-
+ MESHPARAMS_ADD(dot11MeshHWMPRootMode);
+ MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
+ MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
#undef MESHPARAMS_ADD
}
#endif
@@ -506,7 +559,7 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
add_sta_files(sdata);
break;
case NL80211_IFTYPE_ADHOC:
- /* XXX */
+ add_ibss_files(sdata);
break;
case NL80211_IFTYPE_AP:
add_ap_files(sdata);
@@ -545,6 +598,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
debugfs_remove_recursive(sdata->debugfs.dir);
sdata->debugfs.dir = NULL;
+ sdata->debugfs.subdir_stations = NULL;
}
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a01d213..3110cbd 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -56,19 +56,22 @@ STA_FILE(last_signal, last_signal, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[100];
+ char buf[121];
struct sta_info *sta = file->private_data;
- u32 staflags = get_sta_flags(sta);
- int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
- staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
- staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
- staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "",
- staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "",
- staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
- staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
- staflags & WLAN_STA_WME ? "WME\n" : "",
- staflags & WLAN_STA_WDS ? "WDS\n" : "",
- staflags & WLAN_STA_MFP ? "MFP\n" : "");
+
+#define TEST(flg) \
+ test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
+
+ int res = scnprintf(buf, sizeof(buf),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
+ TEST(PS_DRIVER), TEST(AUTHORIZED),
+ TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
+ TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
+ TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
+ TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
+ TEST(TDLS_PEER_AUTH));
+#undef TEST
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
STA_OPS(flags);
@@ -78,8 +81,14 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
- return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
- skb_queue_len(&sta->ps_tx_buf));
+ char buf[17*IEEE80211_NUM_ACS], *p = buf;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
+ skb_queue_len(&sta->ps_tx_buf[ac]) +
+ skb_queue_len(&sta->tx_filtered[ac]));
+ return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(num_ps_buf_frames);
@@ -265,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
- PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
- "3839 bytes");
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+ "3839 bytes");
+ PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
"7935 bytes");
/*
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index eebf7a6..5f165d7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -130,6 +130,37 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline int drv_tx_sync(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ trace_drv_tx_sync(local, sdata, bssid, type);
+ if (local->ops->tx_sync)
+ ret = local->ops->tx_sync(&local->hw, &sdata->vif,
+ bssid, type);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
+static inline void drv_finish_tx_sync(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ might_sleep();
+
+ trace_drv_finish_tx_sync(local, sdata, bssid, type);
+ if (local->ops->finish_tx_sync)
+ local->ops->finish_tx_sync(&local->hw, &sdata->vif,
+ bssid, type);
+ trace_drv_return_void(local);
+}
+
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
struct netdev_hw_addr_list *mc_list)
{
@@ -218,6 +249,16 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
return ret;
}
+static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+
+ trace_drv_cancel_hw_scan(local, sdata);
+ local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
+
static inline int
drv_sched_scan_start(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -372,50 +413,56 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
+static inline int drv_conf_tx(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
int ret = -EOPNOTSUPP;
might_sleep();
- trace_drv_conf_tx(local, queue, params);
+ trace_drv_conf_tx(local, sdata, queue, params);
if (local->ops->conf_tx)
- ret = local->ops->conf_tx(&local->hw, queue, params);
+ ret = local->ops->conf_tx(&local->hw, &sdata->vif,
+ queue, params);
trace_drv_return_int(local, ret);
return ret;
}
-static inline u64 drv_get_tsf(struct ieee80211_local *local)
+static inline u64 drv_get_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
u64 ret = -1ULL;
might_sleep();
- trace_drv_get_tsf(local);
+ trace_drv_get_tsf(local, sdata);
if (local->ops->get_tsf)
- ret = local->ops->get_tsf(&local->hw);
+ ret = local->ops->get_tsf(&local->hw, &sdata->vif);
trace_drv_return_u64(local, ret);
return ret;
}
-static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
+static inline void drv_set_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 tsf)
{
might_sleep();
- trace_drv_set_tsf(local, tsf);
+ trace_drv_set_tsf(local, sdata, tsf);
if (local->ops->set_tsf)
- local->ops->set_tsf(&local->hw, tsf);
+ local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
trace_drv_return_void(local);
}
-static inline void drv_reset_tsf(struct ieee80211_local *local)
+static inline void drv_reset_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
might_sleep();
- trace_drv_reset_tsf(local);
+ trace_drv_reset_tsf(local, sdata);
if (local->ops->reset_tsf)
- local->ops->reset_tsf(&local->hw);
+ local->ops->reset_tsf(&local->hw, &sdata->vif);
trace_drv_return_void(local);
}
@@ -549,37 +596,6 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
return ret;
}
-static inline int drv_offchannel_tx(struct ieee80211_local *local,
- struct sk_buff *skb,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int wait)
-{
- int ret;
-
- might_sleep();
-
- trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
- ret = local->ops->offchannel_tx(&local->hw, skb, chan,
- channel_type, wait);
- trace_drv_return_int(local, ret);
-
- return ret;
-}
-
-static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
-{
- int ret;
-
- might_sleep();
-
- trace_drv_offchannel_tx_cancel_wait(local);
- ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
- trace_drv_return_int(local, ret);
-
- return ret;
-}
-
static inline int drv_set_ringparam(struct ieee80211_local *local,
u32 tx, u32 rx)
{
@@ -637,4 +653,52 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
return ret;
}
+static inline void drv_set_rekey_data(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ trace_drv_set_rekey_data(local, sdata, data);
+ if (local->ops->set_rekey_data)
+ local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
+ trace_drv_return_void(local);
+}
+
+static inline void drv_rssi_callback(struct ieee80211_local *local,
+ const enum ieee80211_rssi_event event)
+{
+ trace_drv_rssi_callback(local, event);
+ if (local->ops->rssi_callback)
+ local->ops->rssi_callback(&local->hw, event);
+ trace_drv_return_void(local);
+}
+
+static inline void
+drv_release_buffered_frames(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames,
+ reason, more_data);
+ if (local->ops->release_buffered_frames)
+ local->ops->release_buffered_frames(&local->hw, &sta->sta, tids,
+ num_frames, reason,
+ more_data);
+ trace_drv_return_void(local);
+}
+
+static inline void
+drv_allow_buffered_frames(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames,
+ reason, more_data);
+ if (local->ops->allow_buffered_frames)
+ local->ops->allow_buffered_frames(&local->hw, &sta->sta,
+ tids, num_frames, reason,
+ more_data);
+ trace_drv_return_void(local);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index ed9edcb..2af4fca 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -319,6 +319,49 @@ TRACE_EVENT(drv_bss_info_changed,
)
);
+DECLARE_EVENT_CLASS(tx_sync_evt,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type),
+ TP_ARGS(local, sdata, bssid, type),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __array(char, bssid, ETH_ALEN)
+ __field(u32, sync_type)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->bssid, bssid, ETH_ALEN);
+ __entry->sync_type = type;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " bssid:%pM type:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->bssid, __entry->sync_type
+ )
+);
+
+DEFINE_EVENT(tx_sync_evt, drv_tx_sync,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type),
+ TP_ARGS(local, sdata, bssid, type)
+);
+
+DEFINE_EVENT(tx_sync_evt, drv_finish_tx_sync,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type),
+ TP_ARGS(local, sdata, bssid, type)
+);
+
TRACE_EVENT(drv_prepare_multicast,
TP_PROTO(struct ieee80211_local *local, int mc_count),
@@ -460,6 +503,12 @@ DEFINE_EVENT(local_sdata_evt, drv_hw_scan,
TP_ARGS(local, sdata)
);
+DEFINE_EVENT(local_sdata_evt, drv_cancel_hw_scan,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata),
@@ -648,64 +697,76 @@ TRACE_EVENT(drv_sta_remove,
);
TRACE_EVENT(drv_conf_tx,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 queue,
const struct ieee80211_tx_queue_params *params),
- TP_ARGS(local, queue, params),
+ TP_ARGS(local, sdata, queue, params),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(u16, queue)
__field(u16, txop)
__field(u16, cw_min)
__field(u16, cw_max)
__field(u8, aifs)
+ __field(bool, uapsd)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->queue = queue;
__entry->txop = params->txop;
__entry->cw_max = params->cw_max;
__entry->cw_min = params->cw_min;
__entry->aifs = params->aifs;
+ __entry->uapsd = params->uapsd;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d",
- LOCAL_PR_ARG, __entry->queue
+ LOCAL_PR_FMT VIF_PR_FMT " queue:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
)
);
-DEFINE_EVENT(local_only_evt, drv_get_tsf,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_get_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
TRACE_EVENT(drv_set_tsf,
- TP_PROTO(struct ieee80211_local *local, u64 tsf),
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 tsf),
- TP_ARGS(local, tsf),
+ TP_ARGS(local, sdata, tsf),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(u64, tsf)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->tsf = tsf;
),
TP_printk(
- LOCAL_PR_FMT " tsf:%llu",
- LOCAL_PR_ARG, (unsigned long long)__entry->tsf
+ LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf
)
);
-DEFINE_EVENT(local_only_evt, drv_reset_tsf,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_reset_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
@@ -1018,6 +1079,111 @@ TRACE_EVENT(drv_set_bitrate_mask,
)
);
+TRACE_EVENT(drv_set_rekey_data,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_gtk_rekey_data *data),
+
+ TP_ARGS(local, sdata, data),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __array(u8, kek, NL80211_KEK_LEN)
+ __array(u8, kck, NL80211_KCK_LEN)
+ __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(__entry->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(__entry->replay_ctr, data->replay_ctr,
+ NL80211_REPLAY_CTR_LEN);
+ ),
+
+ TP_printk(LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG)
+);
+
+TRACE_EVENT(drv_rssi_callback,
+ TP_PROTO(struct ieee80211_local *local,
+ enum ieee80211_rssi_event rssi_event),
+
+ TP_ARGS(local, rssi_event),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, rssi_event)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->rssi_event = rssi_event;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " rssi_event:%d",
+ LOCAL_PR_ARG, __entry->rssi_event
+ )
+);
+
+DECLARE_EVENT_CLASS(release_evt,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(u16, tids)
+ __field(int, num_frames)
+ __field(int, reason)
+ __field(bool, more_data)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->tids = tids;
+ __entry->num_frames = num_frames;
+ __entry->reason = reason;
+ __entry->more_data = more_data;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT
+ " TIDs:0x%.4x frames:%d reason:%d more:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames,
+ __entry->reason, __entry->more_data
+ )
+);
+
+DEFINE_EVENT(release_evt, drv_release_buffered_frames,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data)
+);
+
+DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data)
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1287,6 +1453,73 @@ DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
TP_ARGS(local)
);
+TRACE_EVENT(api_gtk_rekey_notify,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const u8 *replay_ctr),
+
+ TP_ARGS(sdata, bssid, replay_ctr),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __array(u8, bssid, ETH_ALEN)
+ __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ memcpy(__entry->bssid, bssid, ETH_ALEN);
+ memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN);
+ ),
+
+ TP_printk(VIF_PR_FMT, VIF_PR_ARG)
+);
+
+TRACE_EVENT(api_enable_rssi_reports,
+ TP_PROTO(struct ieee80211_sub_if_data *sdata,
+ int rssi_min_thold, int rssi_max_thold),
+
+ TP_ARGS(sdata, rssi_min_thold, rssi_max_thold),
+
+ TP_STRUCT__entry(
+ VIF_ENTRY
+ __field(int, rssi_min_thold)
+ __field(int, rssi_max_thold)
+ ),
+
+ TP_fast_assign(
+ VIF_ASSIGN;
+ __entry->rssi_min_thold = rssi_min_thold;
+ __entry->rssi_max_thold = rssi_max_thold;
+ ),
+
+ TP_printk(
+ VIF_PR_FMT " rssi_min_thold =%d, rssi_max_thold = %d",
+ VIF_PR_ARG, __entry->rssi_min_thold, __entry->rssi_max_thold
+ )
+);
+
+TRACE_EVENT(api_eosp,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, STA_PR_FMT
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 591add2..f0fb737 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -14,6 +14,7 @@
*/
#include <linux/ieee80211.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "rate.h"
@@ -130,7 +131,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
* down by the code that set the flag, so this
* need not run.
*/
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA))
return;
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -140,6 +141,12 @@ void ieee80211_ba_session_work(struct work_struct *work)
sta, tid, WLAN_BACK_RECIPIENT,
WLAN_REASON_QSTA_TIMEOUT, true);
+ if (test_and_clear_bit(tid,
+ sta->ampdu_mlme.tid_rx_stop_requested))
+ ___ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_UNSPECIFIED, true);
+
tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
if (tid_tx) {
/*
@@ -180,12 +187,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
u16 params;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer "
- "for delba frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c6399f6..1c018d1 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -81,10 +81,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
lockdep_assert_held(&ifibss->mtx);
/* Reset own TSF to allow time synchronization work. */
- drv_reset_tsf(local);
+ drv_reset_tsf(local, sdata);
skb = ifibss->skb;
- rcu_assign_pointer(ifibss->presp, NULL);
+ RCU_INIT_POINTER(ifibss->presp, NULL);
synchronize_rcu();
skb->data = skb->head;
skb->len = 0;
@@ -314,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (sta && elems->wmm_info)
- set_sta_flags(sta, WLAN_STA_WME);
+ set_sta_flag(sta, WLAN_STA_WME);
rcu_read_unlock();
}
@@ -382,7 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* second best option: get current TSF
* (will return -1 if not supported)
*/
- rx_timestamp = drv_get_tsf(local);
+ rx_timestamp = drv_get_tsf(local, sdata);
}
#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -417,7 +417,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* must be callable in atomic context.
*/
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid,u8 *addr, u32 supp_rates,
+ u8 *bssid, u8 *addr, u32 supp_rates,
gfp_t gfp)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -452,7 +452,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
sta->last_rx = jiffies;
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
/* make sure mandatory rates are always added */
sta->sta.supp_rates[band] = supp_rates |
@@ -914,6 +914,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.privacy = params->privacy;
sdata->u.ibss.basic_rates = params->basic_rates;
+ sdata->u.ibss.last_scan_completed = jiffies;
memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
sizeof(params->mcast_rate));
@@ -995,7 +996,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree(sdata->u.ibss.ie);
skb = rcu_dereference_protected(sdata->u.ibss.presp,
lockdep_is_held(&sdata->u.ibss.mtx));
- rcu_assign_pointer(sdata->u.ibss.presp, NULL);
+ RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
sdata->vif.bss_conf.ibss_joined = false;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 62b86f0..8da371c5 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -53,11 +53,25 @@ struct ieee80211_local;
#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
-#define IEEE80211_DEFAULT_UAPSD_QUEUES \
- (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+/*
+ * Some APs experience problems when working with U-APSD. Decreasing the
+ * probability of that happening by using legacy mode for all ACs but VO isn't
+ * enough.
+ *
+ * Cisco 4410N originally forced us to enable VO by default only because it
+ * treated non-VO ACs as legacy.
+ *
+ * However some APs (notably Netgear R7000) silently reclassify packets to
+ * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
+ * clients would never see some frames (e.g. ARP responses) or would fetch them
+ * accidentally after a long time.
+ *
+ * It makes little sense to enable u-APSD queues by default because it needs
+ * userspace applications to be aware of it to actually take advantage of the
+ * possible additional powersavings. Implicitly depending on driver autotrigger
+ * frame support doesn't make much sense.
+ */
+#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
#define IEEE80211_DEFAULT_MAX_SP_LEN \
IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -136,7 +150,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
#define TX_DROP ((__force ieee80211_tx_result) 1u)
#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
-#define IEEE80211_TX_FRAGMENTED BIT(0)
#define IEEE80211_TX_UNICAST BIT(1)
#define IEEE80211_TX_PS_BUFFERED BIT(2)
@@ -149,7 +162,6 @@ struct ieee80211_tx_data {
struct ieee80211_channel *channel;
- u16 ethertype;
unsigned int flags;
};
@@ -202,7 +214,22 @@ struct ieee80211_rx_data {
struct ieee80211_key *key;
unsigned int flags;
- int queue;
+
+ /*
+ * Index into sequence numbers array, 0..16
+ * since the last (16) is used for non-QoS,
+ * will be 16 on non-QoS frames.
+ */
+ int seqno_idx;
+
+ /*
+ * Index into the security IV/PN arrays, 0..16
+ * since the last (16) is used for CCMP-encrypted
+ * management frames, will be set to 16 on mgmt
+ * frames and 0 on non-QoS frames.
+ */
+ int security_idx;
+
u32 tkip_iv32;
u16 tkip_iv16;
};
@@ -246,6 +273,7 @@ struct mesh_stats {
__u32 fwded_frames; /* Mesh total forwarded frames */
__u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
__u32 dropped_frames_no_route; /* Not transmitted, no route found */
+ __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
atomic_t estab_plinks;
};
@@ -308,6 +336,7 @@ struct ieee80211_work {
u8 key[WLAN_KEY_LEN_WEP104];
u8 key_len, key_idx;
bool privacy;
+ bool synced;
} probe_auth;
struct {
struct cfg80211_bss *bss;
@@ -321,6 +350,7 @@ struct ieee80211_work {
u8 ssid_len;
u8 supp_rates_len;
bool wmm_used, use_11n, uapsd_used;
+ bool synced;
} assoc;
struct {
u32 duration;
@@ -419,6 +449,14 @@ struct ieee80211_if_managed {
* generated for the current association.
*/
int last_cqm_event_signal;
+
+ /*
+ * State variables for keeping track of RSSI of the AP currently
+ * connected to and informing driver when RSSI has gone
+ * below/above a certain threshold.
+ */
+ int rssi_min_thold, rssi_max_thold;
+ int last_ave_beacon_signal;
};
struct ieee80211_if_ibss {
@@ -491,6 +529,7 @@ struct ieee80211_if_mesh {
struct mesh_config mshcfg;
u32 mesh_seqnum;
bool accepting_plinks;
+ int num_gates;
const u8 *ie;
u8 ie_len;
enum {
@@ -517,12 +556,14 @@ struct ieee80211_if_mesh {
* @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
* associated stations and deliver multicast frames both
* back to wireless media and to the local net stack.
+ * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
IEEE80211_SDATA_PROMISC = BIT(1),
IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
+ IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
};
/**
@@ -546,6 +587,9 @@ struct ieee80211_sub_if_data {
/* keys */
struct list_head key_list;
+ /* count for keys needing tailroom space allocation */
+ int crypto_tx_tailroom_needed_cnt;
+
struct net_device *dev;
struct ieee80211_local *local;
@@ -579,6 +623,8 @@ struct ieee80211_sub_if_data {
__be16 control_port_protocol;
bool control_port_no_encrypt;
+ struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
+
struct work_struct work;
struct sk_buff_head skb_queue;
@@ -632,6 +678,11 @@ enum sdata_queue_type {
enum {
IEEE80211_RX_MSG = 1,
IEEE80211_TX_STATUS_MSG = 2,
+ IEEE80211_EOSP_MSG = 3,
+};
+
+struct skb_eosp_msg_data {
+ u8 sta[ETH_ALEN], iface[ETH_ALEN];
};
enum queue_stop_reason {
@@ -641,6 +692,7 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
};
#ifdef CONFIG_MAC80211_LEDS
@@ -664,16 +716,22 @@ struct tpt_led_trigger {
* well be on the operating channel
* @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
* determine if we are on the operating channel or not
+ * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
+ * gets only set in conjunction with SCAN_SW_SCANNING
* @SCAN_COMPLETED: Set for our scan work function when the driver reported
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
* a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ * cancelled.
*/
enum {
SCAN_SW_SCANNING,
SCAN_HW_SCANNING,
+ SCAN_OFF_CHANNEL,
SCAN_COMPLETED,
SCAN_ABORTED,
+ SCAN_HW_CANCELLED,
};
/**
@@ -973,7 +1031,6 @@ struct ieee80211_local {
unsigned int hw_roc_duration;
u32 hw_roc_cookie;
bool hw_roc_for_tx;
- unsigned long hw_offchan_tx_cookie;
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -993,69 +1050,6 @@ struct ieee80211_ra_tid {
u16 tid;
};
-/* Parsed Information Elements */
-struct ieee802_11_elems {
- u8 *ie_start;
- size_t total_len;
-
- /* pointers to IEs */
- u8 *ssid;
- u8 *supp_rates;
- u8 *fh_params;
- u8 *ds_params;
- u8 *cf_params;
- struct ieee80211_tim_ie *tim;
- u8 *ibss_params;
- u8 *challenge;
- u8 *wpa;
- u8 *rsn;
- u8 *erp_info;
- u8 *ext_supp_rates;
- u8 *wmm_info;
- u8 *wmm_param;
- struct ieee80211_ht_cap *ht_cap_elem;
- struct ieee80211_ht_info *ht_info_elem;
- struct ieee80211_meshconf_ie *mesh_config;
- u8 *mesh_id;
- u8 *peer_link;
- u8 *preq;
- u8 *prep;
- u8 *perr;
- struct ieee80211_rann_ie *rann;
- u8 *ch_switch_elem;
- u8 *country_elem;
- u8 *pwr_constr_elem;
- u8 *quiet_elem; /* first quite element */
- u8 *timeout_int;
-
- /* length of them, respectively */
- u8 ssid_len;
- u8 supp_rates_len;
- u8 fh_params_len;
- u8 ds_params_len;
- u8 cf_params_len;
- u8 tim_len;
- u8 ibss_params_len;
- u8 challenge_len;
- u8 wpa_len;
- u8 rsn_len;
- u8 erp_info_len;
- u8 ext_supp_rates_len;
- u8 wmm_info_len;
- u8 wmm_param_len;
- u8 mesh_id_len;
- u8 peer_link_len;
- u8 preq_len;
- u8 prep_len;
- u8 perr_len;
- u8 ch_switch_elem_len;
- u8 country_elem_len;
- u8 pwr_constr_elem_len;
- u8 quiet_elem_len;
- u8 num_of_quiet_elem; /* can be more the one */
- u8 timeout_int_len;
-};
-
static inline struct ieee80211_local *hw_to_local(
struct ieee80211_hw *hw)
{
@@ -1166,10 +1160,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
void ieee80211_sched_scan_stopped_work(struct work_struct *work);
/* off-channel helpers */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
- bool tell_ap);
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
+void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
void ieee80211_offchannel_return(struct ieee80211_local *local,
bool enable_beaconing);
void ieee80211_hw_roc_setup(struct ieee80211_local *local);
@@ -1202,23 +1194,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-/*
- * radiotap header for status frames
- */
-struct ieee80211_tx_status_rtap_hdr {
- struct ieee80211_radiotap_header hdr;
- u8 rate;
- u8 padding_for_rate;
- __le16 tx_flags;
- u8 data_retries;
-} __packed;
-
-
/* HT */
void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
struct ieee80211_ht_cap *ht_cap_ie,
struct ieee80211_sta_ht_cap *ht_cap);
-void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code);
@@ -1302,6 +1281,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems);
@@ -1333,11 +1313,11 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs);
-int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data);
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs);
+void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
@@ -1348,12 +1328,14 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
enum ieee80211_band band, u32 rate_mask,
u8 channel);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
- u8 *dst,
+ u8 *dst, u32 ratemask,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len);
+ const u8 *ie, size_t ie_len,
+ bool directed);
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len);
+ const u8 *ie, size_t ie_len,
+ u32 ratemask, bool directed, bool no_cck);
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bd1ef84..24ec86f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -299,8 +299,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
goto err_del_interface;
}
- /* no locking required since STA is not live yet */
- sta->flags |= WLAN_STA_AUTHORIZED;
+ /* no atomic bitop required since STA is not live yet */
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
res = sta_info_insert(sta);
if (res) {
@@ -363,8 +363,7 @@ static int ieee80211_open(struct net_device *dev)
int err;
/* fail early if user set an invalid address */
- if (!is_zero_ether_addr(dev->dev_addr) &&
- !is_valid_ether_addr(dev->dev_addr))
+ if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
@@ -383,10 +382,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
u32 hw_reconf_flags = 0;
int i;
enum nl80211_channel_type orig_ct;
+ bool cancel_scan;
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- if (local->scan_sdata == sdata)
+ cancel_scan = local->scan_sdata == sdata;
+ if (cancel_scan)
ieee80211_scan_cancel(local);
/*
@@ -457,21 +458,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old_beacon);
- /* free all potentially still buffered bcast frames */
- while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
- local->total_ps_buffered--;
- dev_kfree_skb(skb);
- }
-
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
u.vlan.list)
dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
+
+ /* free all potentially still buffered bcast frames */
+ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
+ skb_queue_purge(&sdata->u.ap.ps_bc_buf);
}
if (going_down)
@@ -546,6 +545,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_ps(local, -1);
+ if (cancel_scan)
+ flush_delayed_work(&local->scan_work);
+
if (local->open_count == 0) {
if (local->ops->napi_poll)
napi_disable(&local->napi);
@@ -658,7 +660,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_stop = ieee80211_stop,
.ndo_uninit = ieee80211_teardown_sdata,
.ndo_start_xmit = ieee80211_subif_start_xmit,
- .ndo_set_multicast_list = ieee80211_set_multicast_list,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
@@ -702,7 +704,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_stop = ieee80211_stop,
.ndo_uninit = ieee80211_teardown_sdata,
.ndo_start_xmit = ieee80211_monitor_start_xmit,
- .ndo_set_multicast_list = ieee80211_set_multicast_list,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_select_queue = ieee80211_monitor_select_queue,
@@ -1143,8 +1145,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ASSERT_RTNL();
- ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
- name, ieee80211_if_setup, local->hw.queues);
+ ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
+ name, ieee80211_if_setup, local->hw.queues, 1);
if (!ndev)
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1227,6 +1229,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_path_flush_by_iface(sdata);
+
synchronize_rcu();
unregister_netdevice(sdata->dev);
}
@@ -1255,6 +1260,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_path_flush_by_iface(sdata);
+
unregister_netdevice_queue(sdata->dev, &unreg_list);
}
mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index f825e2f..fb02ea5 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -61,6 +62,36 @@ static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
return NULL;
}
+static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+{
+ /*
+ * When this count is zero, SKB resizing for allocating tailroom
+ * for IV or MMIC is skipped. But, this check has created two race
+ * cases in xmit path while transiting from zero count to one:
+ *
+ * 1. SKB resize was skipped because no key was added but just before
+ * the xmit key is added and SW encryption kicks off.
+ *
+ * 2. SKB resize was skipped because all the keys were hw planted but
+ * just before xmit one of the key is deleted and SW encryption kicks
+ * off.
+ *
+ * In both the above case SW encryption will find not enough space for
+ * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c)
+ *
+ * Solution has been explained at
+ * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
+ */
+
+ if (!sdata->crypto_tx_tailroom_needed_cnt++) {
+ /*
+ * Flush all XMIT packets currently using HW encryption or no
+ * encryption at all if the count transition is from 0 -> 1.
+ */
+ synchronize_net();
+ }
+}
+
static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
struct ieee80211_sub_if_data *sdata;
@@ -101,6 +132,11 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
+
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ sdata->crypto_tx_tailroom_needed_cnt--;
+
return 0;
}
@@ -142,6 +178,10 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = get_sta_for_key(key);
sdata = key->sdata;
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ increment_tailroom_need_count(sdata);
+
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
@@ -239,7 +279,7 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
bool defunikey, defmultikey, defmgmtkey;
if (new)
- list_add(&new->list, &sdata->key_list);
+ list_add_tail(&new->list, &sdata->key_list);
if (sta && pairwise) {
rcu_assign_pointer(sta->ptk, new);
@@ -330,6 +370,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
get_unaligned_le16(seq);
}
}
+ spin_lock_init(&key->u.tkip.txlock);
break;
case WLAN_CIPHER_SUITE_CCMP:
key->conf.iv_len = CCMP_HDR_LEN;
@@ -394,8 +435,10 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
ieee80211_aes_key_free(key->u.ccmp.tfm);
if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
- if (key->local)
+ if (key->local) {
ieee80211_debugfs_key_remove(key);
+ key->sdata->crypto_tx_tailroom_needed_cnt--;
+ }
kfree(key);
}
@@ -422,7 +465,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
* some hardware cannot handle TKIP with QoS, so
* we indicate whether QoS could be in use.
*/
- if (test_sta_flags(sta, WLAN_STA_WME))
+ if (test_sta_flag(sta, WLAN_STA_WME))
key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
} else {
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -436,7 +479,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
/* same here, the AP could be using QoS */
ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
if (ap) {
- if (test_sta_flags(ap, WLAN_STA_WME))
+ if (test_sta_flag(ap, WLAN_STA_WME))
key->conf.flags |=
IEEE80211_KEY_FLAG_WMM_STA;
}
@@ -452,6 +495,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
else
old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ increment_tailroom_need_count(sdata);
+
__ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
__ieee80211_key_destroy(old_key);
@@ -498,12 +543,49 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
mutex_lock(&sdata->local->key_mtx);
- list_for_each_entry(key, &sdata->key_list, list)
+ sdata->crypto_tx_tailroom_needed_cnt = 0;
+
+ list_for_each_entry(key, &sdata->key_list, list) {
+ increment_tailroom_need_count(sdata);
ieee80211_key_enable_hw_accel(key);
+ }
mutex_unlock(&sdata->local->key_mtx);
}
+void ieee80211_iter_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data),
+ void *iter_data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_key *key;
+ struct ieee80211_sub_if_data *sdata;
+
+ ASSERT_RTNL();
+
+ mutex_lock(&local->key_mtx);
+ if (vif) {
+ sdata = vif_to_sdata(vif);
+ list_for_each_entry(key, &sdata->key_list, list)
+ iter(hw, &sdata->vif,
+ key->sta ? &key->sta->sta : NULL,
+ &key->conf, iter_data);
+ } else {
+ list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(key, &sdata->key_list, list)
+ iter(hw, &sdata->vif,
+ key->sta ? &key->sta->sta : NULL,
+ &key->conf, iter_data);
+ }
+ mutex_unlock(&local->key_mtx);
+}
+EXPORT_SYMBOL(ieee80211_iter_keys);
+
void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key;
@@ -533,3 +615,89 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
mutex_unlock(&sdata->local->key_mtx);
}
+
+
+void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
+ const u8 *replay_ctr, gfp_t gfp)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr);
+
+ cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp);
+}
+EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify);
+
+void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq)
+{
+ struct ieee80211_key *key;
+ u64 pn64;
+
+ if (WARN_ON(!(keyconf->flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ return;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ seq->tkip.iv32 = key->u.tkip.tx.iv32;
+ seq->tkip.iv16 = key->u.tkip.tx.iv16;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pn64 = atomic64_read(&key->u.ccmp.tx_pn);
+ seq->ccmp.pn[5] = pn64;
+ seq->ccmp.pn[4] = pn64 >> 8;
+ seq->ccmp.pn[3] = pn64 >> 16;
+ seq->ccmp.pn[2] = pn64 >> 24;
+ seq->ccmp.pn[1] = pn64 >> 32;
+ seq->ccmp.pn[0] = pn64 >> 40;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
+ seq->ccmp.pn[5] = pn64;
+ seq->ccmp.pn[4] = pn64 >> 8;
+ seq->ccmp.pn[3] = pn64 >> 16;
+ seq->ccmp.pn[2] = pn64 >> 24;
+ seq->ccmp.pn[1] = pn64 >> 32;
+ seq->ccmp.pn[0] = pn64 >> 40;
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL(ieee80211_get_key_tx_seq);
+
+void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq)
+{
+ struct ieee80211_key *key;
+ const u8 *pn;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (WARN_ON(tid < 0 || tid >= NUM_RX_DATA_QUEUES))
+ return;
+ seq->tkip.iv32 = key->u.tkip.rx[tid].iv32;
+ seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (WARN_ON(tid < -1 || tid >= NUM_RX_DATA_QUEUES))
+ return;
+ if (tid < 0)
+ pn = key->u.ccmp.rx_pn[NUM_RX_DATA_QUEUES];
+ else
+ pn = key->u.ccmp.rx_pn[tid];
+ memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (WARN_ON(tid != 0))
+ return;
+ pn = key->u.aes_cmac.rx_pn;
+ memcpy(seq->aes_cmac.pn, pn, CMAC_PN_LEN);
+ break;
+ }
+}
+EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index d801d53..7d4e31f 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -28,8 +28,9 @@
#define CCMP_PN_LEN 6
#define TKIP_IV_LEN 8
#define TKIP_ICV_LEN 4
+#define CMAC_PN_LEN 6
-#define NUM_RX_DATA_QUEUES 17
+#define NUM_RX_DATA_QUEUES 16
struct ieee80211_local;
struct ieee80211_sub_if_data;
@@ -40,9 +41,11 @@ struct sta_info;
*
* @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
* in the hardware for TX crypto hardware acceleration.
+ * @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped.
*/
enum ieee80211_internal_key_flags {
KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
+ KEY_FLAG_TAINTED = BIT(1),
};
enum ieee80211_internal_tkip_state {
@@ -52,9 +55,10 @@ enum ieee80211_internal_tkip_state {
};
struct tkip_ctx {
- u32 iv32;
- u16 iv16;
- u16 p1k[5];
+ u32 iv32; /* current iv32 */
+ u16 iv16; /* current iv16 */
+ u16 p1k[5]; /* p1k cache */
+ u32 p1k_iv32; /* iv32 for which p1k computed */
enum ieee80211_internal_tkip_state state;
};
@@ -71,6 +75,9 @@ struct ieee80211_key {
union {
struct {
+ /* protects tx context */
+ spinlock_t txlock;
+
/* last used TSC */
struct tkip_ctx tx;
@@ -78,32 +85,23 @@ struct ieee80211_key {
struct tkip_ctx rx[NUM_RX_DATA_QUEUES];
} tkip;
struct {
- u8 tx_pn[6];
+ atomic64_t tx_pn;
/*
* Last received packet number. The first
* NUM_RX_DATA_QUEUES counters are used with Data
* frames and the last counter is used with Robust
* Management frames.
*/
- u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6];
+ u8 rx_pn[NUM_RX_DATA_QUEUES + 1][CCMP_PN_LEN];
struct crypto_cipher *tfm;
u32 replays; /* dot11RSNAStatsCCMPReplays */
- /* scratch buffers for virt_to_page() (crypto API) */
-#ifndef AES_BLOCK_LEN
-#define AES_BLOCK_LEN 16
-#endif
- u8 tx_crypto_buf[6 * AES_BLOCK_LEN];
- u8 rx_crypto_buf[6 * AES_BLOCK_LEN];
} ccmp;
struct {
- u8 tx_pn[6];
- u8 rx_pn[6];
+ atomic64_t tx_pn;
+ u8 rx_pn[CMAC_PN_LEN];
struct crypto_cipher *tfm;
u32 replays; /* dot11RSNAStatsCMACReplays */
u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
- /* scratch buffers for virt_to_page() (crypto API) */
- u8 tx_crypto_buf[2 * AES_BLOCK_LEN];
- u8 rx_crypto_buf[2 * AES_BLOCK_LEN];
} aes_cmac;
} u;
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 1459033..1bf7903 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -9,6 +9,7 @@
/* just for IFNAMSIZ */
#include <linux/if.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "led.h"
void ieee80211_led_rx(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1e36fb3..7d9b21d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -19,7 +19,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
ieee80211_configure_filter(local);
}
-/*
- * Returns true if we are logically configured to be on
- * the operating channel AND the hardware-conf is currently
- * configured on the operating channel. Compares channel-type
- * as well.
- */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
-{
- struct ieee80211_channel *chan, *scan_chan;
- enum nl80211_channel_type channel_type;
-
- /* This logic needs to match logic in ieee80211_hw_config */
- if (local->scan_channel) {
- chan = local->scan_channel;
- /* If scanning on oper channel, use whatever channel-type
- * is currently in use.
- */
- if (chan == local->oper_channel)
- channel_type = local->_oper_channel_type;
- else
- channel_type = NL80211_CHAN_NO_HT;
- } else if (local->tmp_channel) {
- chan = scan_chan = local->tmp_channel;
- channel_type = local->tmp_channel_type;
- } else {
- chan = local->oper_channel;
- channel_type = local->_oper_channel_type;
- }
-
- if (chan != local->oper_channel ||
- channel_type != local->_oper_channel_type)
- return false;
-
- /* Check current hardware-config against oper_channel. */
- if ((local->oper_channel != local->hw.conf.channel) ||
- (local->_oper_channel_type != local->hw.conf.channel_type))
- return false;
-
- return true;
-}
-
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
struct ieee80211_channel *chan, *scan_chan;
@@ -145,9 +104,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
scan_chan = local->scan_channel;
- /* If this off-channel logic ever changes, ieee80211_on_oper_channel
- * may need to change as well.
- */
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (scan_chan) {
chan = scan_chan;
@@ -158,19 +114,17 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
channel_type = local->_oper_channel_type;
else
channel_type = NL80211_CHAN_NO_HT;
- } else if (local->tmp_channel) {
+ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+ } else if (local->tmp_channel &&
+ local->oper_channel != local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
+ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
} else {
chan = local->oper_channel;
channel_type = local->_oper_channel_type;
- }
-
- if (chan != local->oper_channel ||
- channel_type != local->_oper_channel_type)
- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
- else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
+ }
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
@@ -279,7 +233,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (local->quiescing || !ieee80211_sdata_running(sdata) ||
- test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
+ test_bit(SCAN_SW_SCANNING, &local->scanning)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
/*
@@ -325,6 +279,8 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct sta_info *sta, *tmp;
+ struct skb_eosp_msg_data *eosp_data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -340,6 +296,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
skb->pkt_type = 0;
ieee80211_tx_status(local_to_hw(local), skb);
break;
+ case IEEE80211_EOSP_MSG:
+ eosp_data = (void *)skb->cb;
+ for_each_sta_info(local, eosp_data->sta, sta, tmp) {
+ /* skip wrong virtual interface */
+ if (memcmp(eosp_data->iface,
+ sta->sdata->vif.addr, ETH_ALEN))
+ continue;
+ clear_sta_flag(sta, WLAN_STA_SP);
+ break;
+ }
+ dev_kfree_skb(skb);
+ break;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
@@ -608,6 +576,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
@@ -742,6 +711,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->int_scan_req)
return -ENOMEM;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+ local->int_scan_req->rates[band] = (u32) -1;
+ }
+
/* if low-level driver supports AP, we also support VLAN */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
@@ -862,6 +837,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->ops->sched_scan_start)
local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ /* mac80211 based drivers don't support internal TDLS setup */
+ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
+ local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
result = wiphy_register(local->hw.wiphy);
if (result < 0)
goto fail_wiphy_register;
@@ -885,12 +864,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
-#ifndef __CHECKER__
- BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
- sizeof(struct ieee80211_tx_status_rtap_hdr));
-#endif
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
- sizeof(struct ieee80211_tx_status_rtap_hdr));
+ IEEE80211_TX_STATUS_HEADROOM);
debugfs_hw_add(local);
@@ -1012,7 +987,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
cancel_work_sync(&local->reconfig_filter);
ieee80211_clear_tx_pending(local);
- sta_info_stop(local);
rate_control_deinitialize(local);
if (skb_queue_len(&local->skb_queue) ||
@@ -1024,6 +998,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
+ sta_info_stop(local);
ieee80211_wep_free(local);
ieee80211_led_exit(local);
kfree(local->int_scan_req);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 370aa94..f85de8e 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
#include "ieee80211_i.h"
#include "mesh.h"
-#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
-#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
-#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
-
#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
#define MESHCONF_CAPAB_FORWARDING 0x08
@@ -27,6 +23,17 @@
int mesh_allocated;
static struct kmem_cache *rm_cache;
+#ifdef CONFIG_MAC80211_MESH
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
+{
+ return (mgmt->u.action.u.mesh_action.action_code ==
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
+}
+#else
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
+{ return false; }
+#endif
+
void ieee80211s_init(void)
{
mesh_pathtbl_init();
@@ -193,10 +200,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
}
p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
- if (!p) {
- printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
+ if (!p)
return 0;
- }
+
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
@@ -204,89 +210,136 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
return 0;
}
-void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+int
+mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- u8 *pos;
- int len, i, rate;
- u8 neighbors;
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- len = sband->n_bitrates;
- if (len > 8)
- len = 8;
- pos = skb_put(skb, len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = len;
- for (i = 0; i < len; i++) {
- rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
-
- if (sband->n_bitrates > len) {
- pos = skb_put(skb, sband->n_bitrates - len + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - len;
- for (i = len; i < sband->n_bitrates; i++) {
- rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
- }
-
- if (sband->band == IEEE80211_BAND_2GHZ) {
- pos = skb_put(skb, 2 + 1);
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
- }
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 *pos, neighbors;
+ u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
- pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
- *pos++ = WLAN_EID_MESH_ID;
- *pos++ = sdata->u.mesh.mesh_id_len;
- if (sdata->u.mesh.mesh_id_len)
- memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
+ if (skb_tailroom(skb) < 2 + meshconf_len)
+ return -ENOMEM;
- pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie));
+ pos = skb_put(skb, 2 + meshconf_len);
*pos++ = WLAN_EID_MESH_CONFIG;
- *pos++ = sizeof(struct ieee80211_meshconf_ie);
+ *pos++ = meshconf_len;
/* Active path selection protocol ID */
- *pos++ = sdata->u.mesh.mesh_pp_id;
-
+ *pos++ = ifmsh->mesh_pp_id;
/* Active path selection metric ID */
- *pos++ = sdata->u.mesh.mesh_pm_id;
-
+ *pos++ = ifmsh->mesh_pm_id;
/* Congestion control mode identifier */
- *pos++ = sdata->u.mesh.mesh_cc_id;
-
+ *pos++ = ifmsh->mesh_cc_id;
/* Synchronization protocol identifier */
- *pos++ = sdata->u.mesh.mesh_sp_id;
-
+ *pos++ = ifmsh->mesh_sp_id;
/* Authentication Protocol identifier */
- *pos++ = sdata->u.mesh.mesh_auth_id;
-
+ *pos++ = ifmsh->mesh_auth_id;
/* Mesh Formation Info - number of neighbors */
- neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
+ neighbors = atomic_read(&ifmsh->mshstats.estab_plinks);
/* Number of neighbor mesh STAs or 15 whichever is smaller */
neighbors = (neighbors > 15) ? 15 : neighbors;
*pos++ = neighbors << 1;
-
/* Mesh capability */
- sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
+ ifmsh->accepting_plinks = mesh_plink_availables(sdata);
*pos = MESHCONF_CAPAB_FORWARDING;
- *pos++ |= sdata->u.mesh.accepting_plinks ?
+ *pos++ |= ifmsh->accepting_plinks ?
MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
- if (sdata->u.mesh.ie) {
- int len = sdata->u.mesh.ie_len;
- const u8 *data = sdata->u.mesh.ie;
- if (skb_tailroom(skb) > len)
- memcpy(skb_put(skb, len), data, len);
+ return 0;
+}
+
+int
+mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
+ *pos++ = WLAN_EID_MESH_ID;
+ *pos++ = ifmsh->mesh_id_len;
+ if (ifmsh->mesh_id_len)
+ memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len);
+
+ return 0;
+}
+
+int
+mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 offset, len;
+ const u8 *data;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return 0;
+
+ /* fast-forward to vendor IEs */
+ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
+
+ if (offset) {
+ len = ifmsh->ie_len - offset;
+ data = ifmsh->ie + offset;
+ if (skb_tailroom(skb) < len)
+ return -ENOMEM;
+ memcpy(skb_put(skb, len), data, len);
}
+
+ return 0;
}
+int
+mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 len = 0;
+ const u8 *data;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return 0;
+
+ /* find RSN IE */
+ data = ifmsh->ie;
+ while (data < ifmsh->ie + ifmsh->ie_len) {
+ if (*data == WLAN_EID_RSN) {
+ len = data[1] + 2;
+ break;
+ }
+ data++;
+ }
+
+ if (len) {
+ if (skb_tailroom(skb) < len)
+ return -ENOMEM;
+ memcpy(skb_put(skb, len), data, len);
+ }
+
+ return 0;
+}
+
+int mesh_add_ds_params_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 3)
+ return -ENOMEM;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ pos = skb_put(skb, 2 + 1);
+ *pos++ = WLAN_EID_DS_PARAMS;
+ *pos++ = 1;
+ *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+ }
+
+ return 0;
+}
static void ieee80211_mesh_path_timer(unsigned long data)
{
@@ -352,8 +405,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
memcpy(hdr->addr3, meshsa, ETH_ALEN);
return 24;
} else {
- *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
- IEEE80211_FCTL_TODS);
+ *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
memcpy(hdr->addr2, meshsa, ETH_ALEN);
@@ -425,7 +477,8 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
mesh_path_tx_root_frame(sdata);
mod_timer(&ifmsh->mesh_path_root_timer,
- round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL));
+ round_jiffies(TU_TO_EXP_TIME(
+ ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
}
#ifdef CONFIG_PM
@@ -433,7 +486,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- /* use atomic bitops in case both timers fire at the same time */
+ /* use atomic bitops in case all timers fire at the same time */
if (del_timer_sync(&ifmsh->housekeeping_timer))
set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
@@ -558,11 +611,18 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_MESH_ACTION:
- mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+ case WLAN_CATEGORY_SELF_PROTECTED:
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+ break;
+ }
break;
- case WLAN_CATEGORY_MESH_PATH_SEL:
- mesh_rx_path_sel_frame(sdata, mgmt, len);
+ case WLAN_CATEGORY_MESH_ACTION:
+ if (mesh_action_is_path_sel(mgmt))
+ mesh_rx_path_sel_frame(sdata, mgmt, len);
break;
}
}
@@ -634,6 +694,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ifmsh->accepting_plinks = true;
ifmsh->preq_id = 0;
ifmsh->sn = 0;
+ ifmsh->num_gates = 0;
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 249e733..8c00e2d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -80,7 +80,10 @@ enum mesh_deferred_task_flags {
* retry
* @discovery_retries: number of discovery retries
* @flags: mesh path flags, as specified on &enum mesh_path_flags
- * @state_lock: mesh path state lock
+ * @state_lock: mesh path state lock used to protect changes to the
+ * mpath itself. No need to take this lock when adding or removing
+ * an mpath to a hash bucket on a path table.
+ * @is_gate: the destination station of this path is a mesh gate
*
*
* The combination of dst and sdata is unique in the mesh path table. Since the
@@ -104,6 +107,7 @@ struct mesh_path {
u8 discovery_retries;
enum mesh_path_flags flags;
spinlock_t state_lock;
+ bool is_gate;
};
/**
@@ -120,6 +124,9 @@ struct mesh_path {
* buckets
* @mean_chain_len: maximum average length for the hash buckets' list, if it is
* reached, the table will grow
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ *
* rcu_head: RCU head to free the table
*/
struct mesh_table {
@@ -133,6 +140,8 @@ struct mesh_table {
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
int mean_chain_len;
+ struct hlist_head *known_gates;
+ spinlock_t gates_lock;
struct rcu_head rcu_head;
};
@@ -166,6 +175,8 @@ struct mesh_rmc {
u32 idx_mask;
};
+#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
+#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
@@ -177,14 +188,6 @@ struct mesh_rmc {
/* Maximum number of paths per interface */
#define MESH_MAX_MPATHS 1024
-/* Pending ANA approval */
-#define MESH_PATH_SEL_ACTION 0
-
-/* PERR reason codes */
-#define PEER_RCODE_UNSPECIFIED 11
-#define PERR_RCODE_NO_ROUTE 12
-#define PERR_RCODE_DEST_UNREACH 13
-
/* Public interfaces */
/* Various */
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -199,6 +202,16 @@ bool mesh_matches_local(struct ieee802_11_elems *ie,
void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
void mesh_mgmt_ies_add(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
+int mesh_add_meshconf_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_meshid_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_rsn_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_vendor_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_ds_params_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
@@ -223,10 +236,13 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx,
struct ieee80211_sub_if_data *sdata);
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len);
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
+
+int mesh_path_add_gate(struct mesh_path *mpath);
+int mesh_path_send_to_gates(struct mesh_path *mpath);
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
/* Mesh plinks */
void mesh_neighbour_update(u8 *hw_addr, u32 rates,
struct ieee80211_sub_if_data *sdata,
@@ -256,12 +272,14 @@ void mesh_pathtbl_unregister(void);
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
void mesh_path_timer(unsigned long data);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
extern int mesh_paths_generation;
#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 2b18053..174040a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,10 +8,12 @@
*/
#include <linux/slab.h>
+#include "wme.h"
#include "mesh.h"
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
-#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args)
+#define mhwmp_dbg(fmt, args...) \
+ printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
#else
#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
@@ -57,29 +59,29 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
#define PREQ_IE_TTL(x) (*(x + 2))
#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x) (x + 7)
-#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0);
-#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
-#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
+#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0)
+#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x))
+#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x))
#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
-#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x));
+#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x))
#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
-#define PREP_IE_ORIG_ADDR(x) (x + 3)
-#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0);
-#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
-#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
-#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
-#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x));
+#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
+#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
+#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
+#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
+#define PREP_IE_TARGET_ADDR(x) (x + 3)
+#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
#define PERR_IE_TTL(x) (*(x))
#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
#define PERR_IE_TARGET_ADDR(x) (x + 3)
-#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0);
-#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0);
+#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
+#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
#define MSEC_TO_TU(x) (x*1000/1024)
#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
@@ -132,24 +134,25 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
- mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
+ mgmt->u.action.u.mesh_action.action_code =
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
switch (action) {
case MPATH_PREQ:
- mhwmp_dbg("sending PREQ to %pM\n", target);
+ mhwmp_dbg("sending PREQ to %pM", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
- mhwmp_dbg("sending PREP to %pM\n", target);
+ mhwmp_dbg("sending PREP to %pM", target);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
- mhwmp_dbg("sending RANN from %pM\n", orig_addr);
+ mhwmp_dbg("sending RANN from %pM", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
@@ -163,35 +166,63 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
*pos++ = flags;
*pos++ = hop_count;
*pos++ = ttl;
- if (action == MPATH_PREQ) {
- memcpy(pos, &preq_id, 4);
+ if (action == MPATH_PREP) {
+ memcpy(pos, target, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &target_sn, 4);
pos += 4;
- }
- memcpy(pos, orig_addr, ETH_ALEN);
- pos += ETH_ALEN;
- memcpy(pos, &orig_sn, 4);
- pos += 4;
- if (action != MPATH_RANN) {
- memcpy(pos, &lifetime, 4);
+ } else {
+ if (action == MPATH_PREQ) {
+ memcpy(pos, &preq_id, 4);
+ pos += 4;
+ }
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_sn, 4);
pos += 4;
}
+ memcpy(pos, &lifetime, 4); /* interval for RANN */
+ pos += 4;
memcpy(pos, &metric, 4);
pos += 4;
if (action == MPATH_PREQ) {
- /* destination count */
- *pos++ = 1;
+ *pos++ = 1; /* destination count */
*pos++ = target_flags;
- }
- if (action != MPATH_RANN) {
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
memcpy(pos, &target_sn, 4);
+ pos += 4;
+ } else if (action == MPATH_PREP) {
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_sn, 4);
+ pos += 4;
}
ieee80211_tx_skb(sdata, skb);
return 0;
}
+
+/* Headroom is not adjusted. Caller should ensure that skb has sufficient
+ * headroom in case the frame is encrypted. */
+static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, 0);
+
+ /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb->priority = 7;
+
+ info->control.vif = &sdata->vif;
+ ieee80211_set_qos_hdr(sdata, skb);
+}
+
/**
* mesh_send_path error - Sends a PERR mesh management frame
*
@@ -199,6 +230,10 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
+ *
+ * Note: This function may be called with driver locks taken that the driver
+ * also acquires in the TX path. To avoid a deadlock we don't transmit the
+ * frame directly but add it to the pending queue instead.
*/
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
__le16 target_rcode, const u8 *ra,
@@ -212,7 +247,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
if (!skb)
return -1;
- skb_reserve(skb, local->hw.extra_tx_headroom);
+ skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
/* 25 is the size of the common mgmt part (24) plus the size of the
* common action part (1)
*/
@@ -224,9 +259,11 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- /* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
- mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ /* BSSID == SA */
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
+ mgmt->u.action.u.mesh_action.action_code =
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
@@ -251,7 +288,9 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
pos += 4;
memcpy(pos, &target_rcode, 2);
- ieee80211_tx_skb(sdata, skb);
+ /* see note in function header */
+ prepare_frame_for_deferred_tx(sdata, skb);
+ ieee80211_add_pending_skb(local, skb);
return 0;
}
@@ -449,7 +488,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (fresh_info) {
mesh_path_assign_nexthop(mpath, sta);
- mpath->flags &= ~MESH_PATH_SN_VALID;
mpath->metric = last_hop_metric;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
@@ -484,10 +522,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
- mhwmp_dbg("received PREQ from %pM\n", orig_addr);
+ mhwmp_dbg("received PREQ from %pM", orig_addr);
if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
- mhwmp_dbg("PREQ is for us\n");
+ mhwmp_dbg("PREQ is for us");
forward = false;
reply = true;
metric = 0;
@@ -523,7 +561,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
- mhwmp_dbg("replying to the PREQ\n");
+ mhwmp_dbg("replying to the PREQ");
mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
cpu_to_le32(target_sn), 0, orig_addr,
cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
@@ -543,7 +581,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
- mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr);
+ mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
--ttl;
flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
@@ -578,7 +616,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
- mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem));
+ mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
/* Note that we divert from the draft nomenclature and denominate
* destination to what the draft refers to as origininator. So in this
@@ -684,6 +722,8 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
u8 ttl, flags, hopcount;
u8 *orig_addr;
u32 orig_sn, metric;
+ u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ bool root_is_gate;
ttl = rann->rann_ttl;
if (ttl <= 1) {
@@ -692,12 +732,19 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
ttl--;
flags = rann->rann_flags;
+ root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
orig_sn = rann->rann_seq;
hopcount = rann->rann_hopcount;
hopcount++;
metric = rann->rann_metric;
- mhwmp_dbg("received RANN from %pM\n", orig_addr);
+
+ /* Ignore our own RANNs */
+ if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
+ return;
+
+ mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr,
+ root_is_gate);
rcu_read_lock();
mpath = mesh_path_lookup(orig_addr, sdata);
@@ -709,18 +756,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
sdata->u.mesh.mshstats.dropped_frames_no_route++;
return;
}
- mesh_queue_preq(mpath,
- PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
+
+ if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
+ time_after(jiffies, mpath->exp_time - 1*HZ)) &&
+ !(mpath->flags & MESH_PATH_FIXED)) {
+ mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
+ orig_addr);
+ mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ }
+
if (mpath->sn < orig_sn) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
- hopcount, ttl, 0,
+ hopcount, ttl, cpu_to_le32(interval),
cpu_to_le32(metric + mpath->metric),
0, sdata);
mpath->sn = orig_sn;
}
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+
rcu_read_unlock();
}
@@ -732,11 +789,20 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems elems;
size_t baselen;
u32 last_hop_metric;
+ struct sta_info *sta;
/* need action_code */
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
return;
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
len - baselen, &elems);
@@ -788,16 +854,16 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
- mhwmp_dbg("could not allocate PREQ node\n");
+ mhwmp_dbg("could not allocate PREQ node");
return;
}
- spin_lock(&ifmsh->mesh_preq_queue_lock);
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
- mhwmp_dbg("PREQ node queue full\n");
+ mhwmp_dbg("PREQ node queue full");
return;
}
@@ -806,7 +872,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
@@ -982,35 +1048,46 @@ void mesh_path_timer(unsigned long data)
{
struct mesh_path *mpath = (void *) data;
struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ int ret;
if (sdata->local->quiescing)
return;
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_RESOLVED ||
- (!(mpath->flags & MESH_PATH_RESOLVING)))
+ (!(mpath->flags & MESH_PATH_RESOLVING))) {
mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
- else if (mpath->discovery_retries < max_preq_retries(sdata)) {
+ spin_unlock_bh(&mpath->state_lock);
+ } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
+ spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
mpath->flags = 0;
mpath->exp_time = jiffies;
- mesh_path_flush_pending(mpath);
+ spin_unlock_bh(&mpath->state_lock);
+ if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
+ ret = mesh_path_send_to_gates(mpath);
+ if (ret)
+ mhwmp_dbg("no gate was reachable");
+ } else
+ mesh_path_flush_pending(mpath);
}
-
- spin_unlock_bh(&mpath->state_lock);
}
void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ u8 flags;
- mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
+ flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
+ ? RANN_FLAG_IS_GATE : 0;
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
0, sdata->u.mesh.mshcfg.element_ttl,
- 0, 0, 0, sdata);
+ cpu_to_le32(interval), 0, 0, sdata);
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0d2faac..7f54c50 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -14,9 +14,16 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
+#include "wme.h"
#include "ieee80211_i.h"
#include "mesh.h"
+#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
+#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
+#endif
+
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
- * as readers. When reading the table (i.e. doing lookups) we are well protected
- * by RCU
+ * as readers. RCU provides sufficient protection only when reading the table
+ * (i.e. doing lookups). Adding or adding or removing nodes requires we take
+ * the read lock or we risk operating on an old table. The write lock is only
+ * needed when modifying the number of buckets a table.
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
lockdep_is_held(&pathtbl_resize_lock));
}
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
+
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
+ spin_lock_init(&newtbl->gates_lock);
return newtbl;
}
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
+ struct mpath_node *gate;
int i;
mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
+ if (free_leafs) {
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_for_each_entry_safe(gate, p, q,
+ tbl->known_gates, list) {
+ hlist_del(&gate->list);
+ kfree(gate);
+ }
+ kfree(tbl->known_gates);
+ spin_unlock_bh(&tbl->gates_lock);
+ }
+
__mesh_table_free(tbl);
}
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
+ newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
rcu_assign_pointer(mpath->next_hop, sta);
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
+ ieee80211_set_qos_hdr(sdata, skb);
__skb_queue_tail(&tmpq, skb);
}
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
+static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
+ struct mesh_path *gate_mpath)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211s_hdr *mshdr;
+ int mesh_hdrlen, hdrlen;
+ char *next_hop;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ if (!(mshdr->flags & MESH_FLAGS_AE)) {
+ /* size of the fixed part of the mesh header */
+ mesh_hdrlen = 6;
+
+ /* make room for the two extended addresses */
+ skb_push(skb, 2 * ETH_ALEN);
+ memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* we preserve the previous mesh header and only add
+ * the new addreses */
+ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ mshdr->flags = MESH_FLAGS_AE_A5_A6;
+ memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
+ memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
+ }
+
+ /* update next hop */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ rcu_read_lock();
+ next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
+ memcpy(hdr->addr1, next_hop, ETH_ALEN);
+ rcu_read_unlock();
+ memcpy(hdr->addr3, dst_addr, ETH_ALEN);
+}
/**
- * mesh_path_lookup - look up a path in the mesh path table
- * @dst: hardware address (ETH_ALEN length) of destination
- * @sdata: local subif
*
- * Returns: pointer to the mesh path structure, or NULL if not found
+ * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
*
- * Locking: must be called within a read rcu section.
+ * This function is used to transfer or copy frames from an unresolved mpath to
+ * a gate mpath. The function also adds the Address Extension field and
+ * updates the next hop.
+ *
+ * If a frame already has an Address Extension field, only the next hop and
+ * destination addresses are updated.
+ *
+ * The gate mpath must be an active mpath with a valid mpath->next_hop.
+ *
+ * @mpath: An active mpath the frames will be sent to (i.e. the gate)
+ * @from_mpath: The failed mpath
+ * @copy: When true, copy all the frames to the new mpath queue. When false,
+ * move them.
*/
-struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
+ struct mesh_path *from_mpath,
+ bool copy)
{
- struct mesh_path *mpath;
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct mesh_table *tbl;
- struct mpath_node *node;
+ struct sk_buff *skb, *cp_skb = NULL;
+ struct sk_buff_head gateq, failq;
+ unsigned long flags;
+ int num_skbs;
- tbl = rcu_dereference(mesh_paths);
+ BUG_ON(gate_mpath == from_mpath);
+ BUG_ON(!gate_mpath->next_hop);
- bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
- hlist_for_each_entry_rcu(node, n, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
- if (MPATH_EXPIRED(mpath)) {
- spin_lock_bh(&mpath->state_lock);
- if (MPATH_EXPIRED(mpath))
- mpath->flags &= ~MESH_PATH_ACTIVE;
- spin_unlock_bh(&mpath->state_lock);
- }
- return mpath;
+ __skb_queue_head_init(&gateq);
+ __skb_queue_head_init(&failq);
+
+ spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+ skb_queue_splice_init(&from_mpath->frame_queue, &failq);
+ spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
+
+ num_skbs = skb_queue_len(&failq);
+
+ while (num_skbs--) {
+ skb = __skb_dequeue(&failq);
+ if (copy) {
+ cp_skb = skb_copy(skb, GFP_ATOMIC);
+ if (cp_skb)
+ __skb_queue_tail(&failq, cp_skb);
}
+
+ prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
+ __skb_queue_tail(&gateq, skb);
}
- return NULL;
+
+ spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
+ skb_queue_splice(&gateq, &gate_mpath->frame_queue);
+ mpath_dbg("Mpath queue for gate %pM has %d frames\n",
+ gate_mpath->dst,
+ skb_queue_len(&gate_mpath->frame_queue));
+ spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
+
+ if (!copy)
+ return;
+
+ spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+ skb_queue_splice(&failq, &from_mpath->frame_queue);
+ spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}
-struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+
+static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
+ struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
- struct mesh_table *tbl;
struct mpath_node *node;
- tbl = rcu_dereference(mpp_paths);
-
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
+ memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
- if (MPATH_EXPIRED(mpath))
- mpath->flags &= ~MESH_PATH_ACTIVE;
+ mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
return NULL;
}
+/**
+ * mesh_path_lookup - look up a path in the mesh path table
+ * @dst: hardware address (ETH_ALEN length) of destination
+ * @sdata: local subif
+ *
+ * Returns: pointer to the mesh path structure, or NULL if not found
+ *
+ * Locking: must be called within a read rcu section.
+ */
+struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+ return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
+}
+
+struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+ return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
+}
+
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
- if (MPATH_EXPIRED(node->mpath))
- node->mpath->flags &= ~MESH_PATH_ACTIVE;
+ node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
return NULL;
}
+static void mesh_gate_node_reclaim(struct rcu_head *rp)
+{
+ struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ kfree(node);
+}
+
+/**
+ * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
+ * @mesh_tbl: table which contains known_gates list
+ * @mpath: mpath to known mesh gate
+ *
+ * Returns: 0 on success
+ *
+ */
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+ struct mpath_node *gate, *new_gate;
+ struct hlist_node *n;
+ int err;
+
+ rcu_read_lock();
+ tbl = rcu_dereference(tbl);
+
+ hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
+ if (gate->mpath == mpath) {
+ err = -EEXIST;
+ goto err_rcu;
+ }
+
+ new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
+ if (!new_gate) {
+ err = -ENOMEM;
+ goto err_rcu;
+ }
+
+ mpath->is_gate = true;
+ mpath->sdata->u.mesh.num_gates++;
+ new_gate->mpath = mpath;
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
+ spin_unlock_bh(&tbl->gates_lock);
+ rcu_read_unlock();
+ mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
+ mpath->sdata->name, mpath->dst,
+ mpath->sdata->u.mesh.num_gates);
+ return 0;
+err_rcu:
+ rcu_read_unlock();
+ return err;
+}
+
+/**
+ * mesh_gate_del - remove a mesh gate from the list of known gates
+ * @tbl: table which holds our list of known gates
+ * @mpath: gate mpath
+ *
+ * Returns: 0 on success
+ *
+ * Locking: must be called inside rcu_read_lock() section
+ */
+static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+ struct mpath_node *gate;
+ struct hlist_node *p, *q;
+
+ tbl = rcu_dereference(tbl);
+
+ hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
+ if (gate->mpath == mpath) {
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_del_rcu(&gate->list);
+ call_rcu(&gate->rcu, mesh_gate_node_reclaim);
+ spin_unlock_bh(&tbl->gates_lock);
+ mpath->sdata->u.mesh.num_gates--;
+ mpath->is_gate = false;
+ mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
+ "%d known gates\n", mpath->sdata->name,
+ mpath->dst, mpath->sdata->u.mesh.num_gates);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
+ */
+int mesh_path_add_gate(struct mesh_path *mpath)
+{
+ return mesh_gate_add(mesh_paths, mpath);
+}
+
+/**
+ * mesh_gate_num - number of gates known to this interface
+ * @sdata: subif data
+ */
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
+{
+ return sdata->u.mesh.num_gates;
+}
+
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
+ init_timer(&new_mpath->timer);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- spin_lock_bh(&mpath->state_lock);
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
+ spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, cpu_to_le32(mpath->sn),
- cpu_to_le16(PERR_RCODE_DEST_UNREACH),
- bcast, sdata);
- } else
- spin_unlock_bh(&mpath->state_lock);
+ reason, bcast, sdata);
+ }
}
rcu_read_unlock();
}
+static void mesh_path_node_reclaim(struct rcu_head *rp)
+{
+ struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
+ del_timer_sync(&node->mpath->timer);
+ atomic_dec(&sdata->u.mesh.mpaths);
+ kfree(node->mpath);
+ kfree(node);
+}
+
+/* needs to be called with the corresponding hashwlock taken */
+static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+{
+ struct mesh_path *mpath;
+ mpath = node->mpath;
+ spin_lock(&mpath->state_lock);
+ mpath->flags |= MESH_PATH_RESOLVING;
+ if (mpath->is_gate)
+ mesh_gate_del(tbl, mpath);
+ hlist_del_rcu(&node->list);
+ call_rcu(&node->rcu, mesh_path_node_reclaim);
+ spin_unlock(&mpath->state_lock);
+ atomic_dec(&tbl->entries);
+}
+
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
int i;
rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mesh_paths();
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (rcu_dereference(mpath->next_hop) == sta)
- mesh_path_del(mpath->dst, mpath->sdata);
+ if (rcu_dereference(mpath->next_hop) == sta) {
+ spin_lock_bh(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock_bh(&tbl->hashwlock[i]);
+ }
}
+ read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
+static void table_flush_by_iface(struct mesh_table *tbl,
+ struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
- rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
+ WARN_ON(!rcu_read_lock_held());
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (mpath->sdata == sdata)
- mesh_path_del(mpath->dst, mpath->sdata);
+ if (mpath->sdata != sdata)
+ continue;
+ spin_lock_bh(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock_bh(&tbl->hashwlock[i]);
}
- rcu_read_unlock();
}
-static void mesh_path_node_reclaim(struct rcu_head *rp)
+/**
+ * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
+ *
+ * This function deletes both mesh paths as well as mesh portal paths.
+ *
+ * @sdata - interface data to match
+ *
+ */
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+ struct mesh_table *tbl;
- del_timer_sync(&node->mpath->timer);
- atomic_dec(&sdata->u.mesh.mpaths);
- kfree(node->mpath);
- kfree(node);
+ rcu_read_lock();
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mesh_paths();
+ table_flush_by_iface(tbl, sdata);
+ tbl = resize_dereference_mpp_paths();
+ table_flush_by_iface(tbl, sdata);
+ read_unlock_bh(&pathtbl_resize_lock);
+ rcu_read_unlock();
}
/**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
- spin_lock_bh(&mpath->state_lock);
- mpath->flags |= MESH_PATH_RESOLVING;
- hlist_del_rcu(&node->list);
- call_rcu(&node->rcu, mesh_path_node_reclaim);
- atomic_dec(&tbl->entries);
- spin_unlock_bh(&mpath->state_lock);
+ __mesh_path_del(tbl, node);
goto enddel;
}
}
@@ -681,6 +934,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
}
/**
+ * mesh_path_send_to_gates - sends pending frames to all known mesh gates
+ *
+ * @mpath: mesh path whose queue will be emptied
+ *
+ * If there is only one gate, the frames are transferred from the failed mpath
+ * queue to that gate's queue. If there are more than one gates, the frames
+ * are copied from each gate to the next. After frames are copied, the
+ * mpath queues are emptied onto the transmission queue.
+ */
+int mesh_path_send_to_gates(struct mesh_path *mpath)
+{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct hlist_node *n;
+ struct mesh_table *tbl;
+ struct mesh_path *from_mpath = mpath;
+ struct mpath_node *gate = NULL;
+ bool copy = false;
+ struct hlist_head *known_gates;
+
+ rcu_read_lock();
+ tbl = rcu_dereference(mesh_paths);
+ known_gates = tbl->known_gates;
+ rcu_read_unlock();
+
+ if (!known_gates)
+ return -EHOSTUNREACH;
+
+ hlist_for_each_entry_rcu(gate, n, known_gates, list) {
+ if (gate->mpath->sdata != sdata)
+ continue;
+
+ if (gate->mpath->flags & MESH_PATH_ACTIVE) {
+ mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
+ mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
+ from_mpath = gate->mpath;
+ copy = true;
+ } else {
+ mpath_dbg("Not forwarding %p\n", gate->mpath);
+ mpath_dbg("flags %x\n", gate->mpath->flags);
+ }
+ }
+
+ hlist_for_each_entry_rcu(gate, n, known_gates, list)
+ if (gate->mpath->sdata == sdata) {
+ mpath_dbg("Sending to %pM\n", gate->mpath->dst);
+ mesh_path_tx_pending(gate->mpath);
+ }
+
+ return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
+}
+
+/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mesh_path *mpath;
u32 sn = 0;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
ra = hdr->addr1;
+ rcu_read_lock();
mpath = mesh_path_lookup(da, sdata);
- if (mpath)
+ if (mpath) {
+ spin_lock_bh(&mpath->state_lock);
sn = ++mpath->sn;
+ spin_unlock_bh(&mpath->state_lock);
+ }
+ rcu_read_unlock();
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
- cpu_to_le32(sn),
- cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
+ cpu_to_le32(sn), reason, ra, sdata);
}
kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&mpath->frame_queue)) &&
- (mpath->flags & MESH_PATH_ACTIVE))
+ while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(skb, mpath->sdata);
}
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
int mesh_pathtbl_init(void)
{
struct mesh_table *tbl_path, *tbl_mpp;
+ int ret;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+ if (!tbl_path->known_gates) {
+ ret = -ENOMEM;
+ goto free_path;
+ }
+ INIT_HLIST_HEAD(tbl_path->known_gates);
+
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_mpp) {
- mesh_table_free(tbl_path, true);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_path;
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+ if (!tbl_mpp->known_gates) {
+ ret = -ENOMEM;
+ goto free_mpp;
+ }
+ INIT_HLIST_HEAD(tbl_mpp->known_gates);
/* Need no locking since this is during init */
RCU_INIT_POINTER(mesh_paths, tbl_path);
RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
+
+free_mpp:
+ mesh_table_free(tbl_mpp, true);
+free_path:
+ mesh_table_free(tbl_path, true);
+ return ret;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
- spin_lock_bh(&mpath->state_lock);
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
- time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
- spin_unlock_bh(&mpath->state_lock);
+ time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(mpath->dst, mpath->sdata);
- } else
- spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
}
@@ -843,6 +1168,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
void mesh_pathtbl_unregister(void)
{
/* no need for locking during exit path */
- mesh_table_free(rcu_dereference_raw(mesh_paths), true);
- mesh_table_free(rcu_dereference_raw(mpp_paths), true);
+ mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
+ mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f4adc09..7e57f5d 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,35 +19,18 @@
#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
-#define PLINK_GET_LLID(p) (p + 4)
-#define PLINK_GET_PLID(p) (p + 6)
+#define PLINK_GET_LLID(p) (p + 2)
+#define PLINK_GET_PLID(p) (p + 4)
#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
jiffies + HZ * t / 1000))
-/* Peer link cancel reasons, all subject to ANA approval */
-#define MESH_LINK_CANCELLED 2
-#define MESH_MAX_NEIGHBORS 3
-#define MESH_CAPABILITY_POLICY_VIOLATION 4
-#define MESH_CLOSE_RCVD 5
-#define MESH_MAX_RETRIES 6
-#define MESH_CONFIRM_TIMEOUT 7
-#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8
-#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
-#define MESH_SECURITY_FAILED_VERIFICATION 10
-
#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
-enum plink_frame_type {
- PLINK_OPEN = 1,
- PLINK_CONFIRM,
- PLINK_CLOSE
-};
-
enum plink_event {
PLINK_UNDEFINED,
OPN_ACPT,
@@ -60,6 +43,10 @@ enum plink_event {
CLS_IGNR
};
+static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_self_protected_actioncode action,
+ u8 *da, __le16 llid, __le16 plid, __le16 reason);
+
static inline
void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
{
@@ -105,7 +92,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
- sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_WME);
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
rate_control_rate_init(sta);
@@ -150,6 +139,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
spin_lock_bh(&sta->lock);
deactivated = __mesh_plink_deactivate(sta);
+ sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, sta->llid, sta->plid,
+ sta->reason);
spin_unlock_bh(&sta->lock);
if (deactivated)
@@ -157,16 +150,16 @@ void mesh_plink_deactivate(struct sta_info *sta)
}
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
- enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
- __le16 reason) {
+ enum ieee80211_self_protected_actioncode action,
+ u8 *da, __le16 llid, __le16 plid, __le16 reason) {
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
sdata->u.mesh.ie_len);
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
- static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
+ int ie_len = 4;
+ u16 peering_proto = 0;
u8 *pos;
- int ie_len;
if (!skb)
return -1;
@@ -175,63 +168,75 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
* common action part (1)
*/
mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
+ skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
+ memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
- mgmt->u.action.u.plink_action.action_code = action;
-
- if (action == PLINK_CLOSE)
- mgmt->u.action.u.plink_action.aux = reason;
- else {
- mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0);
- if (action == PLINK_CONFIRM) {
- pos = skb_put(skb, 4);
- /* two-byte status code followed by two-byte AID */
- memset(pos, 0, 2);
+ mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED;
+ mgmt->u.action.u.self_prot.action_code = action;
+
+ if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+ /* capability info */
+ pos = skb_put(skb, 2);
+ memset(pos, 0, 2);
+ if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
+ /* AID */
+ pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- mesh_mgmt_ies_add(skb, sdata);
+ if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_meshid_ie(skb, sdata) ||
+ mesh_add_meshconf_ie(skb, sdata))
+ return -1;
+ } else { /* WLAN_SP_MESH_PEERING_CLOSE */
+ if (mesh_add_meshid_ie(skb, sdata))
+ return -1;
}
- /* Add Peer Link Management element */
+ /* Add Mesh Peering Management element */
switch (action) {
- case PLINK_OPEN:
- ie_len = 6;
+ case WLAN_SP_MESH_PEERING_OPEN:
break;
- case PLINK_CONFIRM:
- ie_len = 8;
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ ie_len += 2;
include_plid = true;
break;
- case PLINK_CLOSE:
- default:
- if (!plid)
- ie_len = 8;
- else {
- ie_len = 10;
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ if (plid) {
+ ie_len += 2;
include_plid = true;
}
+ ie_len += 2; /* reason code */
break;
+ default:
+ return -EINVAL;
}
+ if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
+ return -ENOMEM;
+
pos = skb_put(skb, 2 + ie_len);
- *pos++ = WLAN_EID_PEER_LINK;
+ *pos++ = WLAN_EID_PEER_MGMT;
*pos++ = ie_len;
- memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto));
- pos += 4;
+ memcpy(pos, &peering_proto, 2);
+ pos += 2;
memcpy(pos, &llid, 2);
+ pos += 2;
if (include_plid) {
- pos += 2;
memcpy(pos, &plid, 2);
- }
- if (action == PLINK_CLOSE) {
pos += 2;
+ }
+ if (action == WLAN_SP_MESH_PEERING_CLOSE) {
memcpy(pos, &reason, 2);
+ pos += 2;
}
+ if (mesh_add_vendor_ies(skb, sdata))
+ return -1;
ieee80211_tx_skb(sdata, skb);
return 0;
@@ -322,21 +327,21 @@ static void mesh_plink_timer(unsigned long data)
++sta->plink_retries;
mod_plink_timer(sta, sta->plink_timeout);
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
- 0, 0);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, llid, 0, 0);
break;
}
- reason = cpu_to_le16(MESH_MAX_RETRIES);
+ reason = cpu_to_le16(WLAN_REASON_MESH_MAX_RETRIES);
/* fall through on else */
case NL80211_PLINK_CNF_RCVD:
/* confirm timer */
if (!reason)
- reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
sta->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
- reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
@@ -380,7 +385,7 @@ int mesh_plink_open(struct sta_info *sta)
__le16 llid;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- if (!test_sta_flags(sta, WLAN_STA_AUTH))
+ if (!test_sta_flag(sta, WLAN_STA_AUTH))
return -EPERM;
spin_lock_bh(&sta->lock);
@@ -396,7 +401,7 @@ int mesh_plink_open(struct sta_info *sta)
mpl_dbg("Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
- return mesh_plink_frame_tx(sdata, PLINK_OPEN,
+ return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
sta->sta.addr, llid, 0, 0);
}
@@ -422,7 +427,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
struct ieee802_11_elems elems;
struct sta_info *sta;
enum plink_event event;
- enum plink_frame_type ftype;
+ enum ieee80211_self_protected_actioncode ftype;
size_t baselen;
bool deactivated, matches_local = true;
u8 ie_len;
@@ -449,14 +454,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- baseaddr = mgmt->u.action.u.plink_action.variable;
- baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
- if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
+ baseaddr = mgmt->u.action.u.self_prot.variable;
+ baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt;
+ if (mgmt->u.action.u.self_prot.action_code ==
+ WLAN_SP_MESH_PEERING_CONFIRM) {
baseaddr += 4;
baselen += 4;
}
ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
- if (!elems.peer_link) {
+ if (!elems.peering) {
mpl_dbg("Mesh plink: missing necessary peer link ie\n");
return;
}
@@ -466,37 +472,40 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- ftype = mgmt->u.action.u.plink_action.action_code;
- ie_len = elems.peer_link_len;
- if ((ftype == PLINK_OPEN && ie_len != 6) ||
- (ftype == PLINK_CONFIRM && ie_len != 8) ||
- (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) {
+ ftype = mgmt->u.action.u.self_prot.action_code;
+ ie_len = elems.peering_len;
+ if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) ||
+ (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
+ && ie_len != 8)) {
mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
ftype, ie_len);
return;
}
- if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) {
+ if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
+ (!elems.mesh_id || !elems.mesh_config)) {
mpl_dbg("Mesh plink: missing necessary ie\n");
return;
}
/* Note the lines below are correct, the llid in the frame is the plid
* from the point of view of this host.
*/
- memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
- if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10))
- memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);
+ memcpy(&plid, PLINK_GET_LLID(elems.peering), 2);
+ if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
+ memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
- if (!sta && ftype != PLINK_OPEN) {
+ if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
return;
}
- if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) {
+ if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
rcu_read_unlock();
return;
@@ -509,30 +518,30 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
/* Now we will figure out the appropriate event... */
event = PLINK_UNDEFINED;
- if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
+ if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
+ (!mesh_matches_local(&elems, sdata))) {
matches_local = false;
switch (ftype) {
- case PLINK_OPEN:
+ case WLAN_SP_MESH_PEERING_OPEN:
event = OPN_RJCT;
break;
- case PLINK_CONFIRM:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
event = CNF_RJCT;
break;
- case PLINK_CLOSE:
- /* avoid warning */
+ default:
break;
}
}
if (!sta && !matches_local) {
rcu_read_unlock();
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
llid = 0;
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ mgmt->sa, llid, plid, reason);
return;
} else if (!sta) {
- /* ftype == PLINK_OPEN */
+ /* ftype == WLAN_SP_MESH_PEERING_OPEN */
u32 rates;
rcu_read_unlock();
@@ -557,21 +566,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (matches_local) {
spin_lock_bh(&sta->lock);
switch (ftype) {
- case PLINK_OPEN:
+ case WLAN_SP_MESH_PEERING_OPEN:
if (!mesh_plink_free_count(sdata) ||
(sta->plid && sta->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
break;
- case PLINK_CONFIRM:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
if (!mesh_plink_free_count(sdata) ||
(sta->llid != llid || sta->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
break;
- case PLINK_CLOSE:
+ case WLAN_SP_MESH_PEERING_CLOSE:
if (sta->plink_state == NL80211_PLINK_ESTAB)
/* Do not check for llid or plid. This does not
* follow the standard but since multiple plinks
@@ -620,10 +629,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->llid = llid;
mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
- 0, 0);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
- llid, plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, llid, 0, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -635,10 +646,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -647,8 +658,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
/* retry timer is left untouched */
@@ -656,8 +668,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->plid = plid;
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
case CNF_ACPT:
sta->plink_state = NL80211_PLINK_CNF_RCVD;
@@ -677,10 +690,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -689,14 +702,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
case CNF_ACPT:
del_timer(&sta->plink_timer);
@@ -717,10 +731,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -729,8 +743,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
del_timer(&sta->plink_timer);
@@ -740,8 +755,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -752,7 +768,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case NL80211_PLINK_ESTAB:
switch (event) {
case CLS_ACPT:
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
deactivated = __mesh_plink_deactivate(sta);
sta->plink_state = NL80211_PLINK_HOLDING;
@@ -761,14 +777,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
spin_unlock_bh(&sta->lock);
if (deactivated)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -790,8 +807,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
reason = sta->reason;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
- llid, plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
default:
spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1563250..9da8626 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -16,10 +16,12 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
@@ -160,7 +162,8 @@ static int ecw2cw(int ecw)
*/
static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct ieee80211_ht_info *hti,
- const u8 *bssid, u16 ap_ht_cap_flags)
+ const u8 *bssid, u16 ap_ht_cap_flags,
+ bool beacon_htcap_ie)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -232,6 +235,21 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
}
+ if (beacon_htcap_ie && (prev_chantype != channel_type)) {
+ /*
+ * Whenever the AP announces the HT mode change that can be
+ * 40MHz intolerant or etc., it would be safer to stop tx
+ * queues before doing hw config to avoid buffer overflow.
+ */
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
+
+ /* flush out all packets */
+ synchronize_net();
+
+ drv_flush(local, false);
+ }
+
/* channel_type change automatically detected */
ieee80211_hw_config(local, 0);
@@ -243,6 +261,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
IEEE80211_RC_HT_CHANGED,
channel_type);
rcu_read_unlock();
+
+ if (beacon_htcap_ie)
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
}
ht_opmode = le16_to_cpu(hti->operation_mode);
@@ -271,11 +293,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "deauth/disassoc frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -330,6 +350,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
if (!skb)
@@ -340,6 +361,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+
ieee80211_tx_skb(sdata, skb);
}
@@ -354,11 +379,9 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
return;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
- "nullfunc frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
@@ -394,6 +417,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
/* call "hw_config" only if doing sw channel switch */
ieee80211_hw_config(sdata->local,
IEEE80211_CONF_CHANGE_CHANNEL);
+ } else {
+ /* update the device channel directly */
+ sdata->local->hw.conf.channel = sdata->local->oper_channel;
}
/* XXX: shouldn't really modify cfg80211-owned data! */
@@ -608,7 +634,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *mgd = &sdata->u.mgd;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
+ bool authorized = false;
if (!mgd->powersave)
return false;
@@ -629,13 +655,10 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
sta = sta_info_get(sdata, mgd->bssid);
if (sta)
- sta_flags = get_sta_flags(sta);
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
rcu_read_unlock();
- if (!(sta_flags & WLAN_STA_AUTHORIZED))
- return false;
-
- return true;
+ return authorized;
}
/* need to hold RTNL or interface lock */
@@ -752,7 +775,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
struct ieee80211_sub_if_data *sdata = local->ps_sdata;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_if_managed *ifmgd;
unsigned long flags;
int q;
@@ -760,26 +783,39 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
if (!sdata)
return;
+ ifmgd = &sdata->u.mgd;
+
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
- /*
- * transmission can be stopped by others which leads to
- * dynamic_ps_timer expiry. Postpond the ps timer if it
- * is not the actual idle state.
- */
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- for (q = 0; q < local->hw.queues; q++) {
- if (local->queue_stop_reasons[q]) {
- spin_unlock_irqrestore(&local->queue_stop_reason_lock,
- flags);
+ if (!local->disable_dynamic_ps &&
+ local->hw.conf.dynamic_ps_timeout > 0) {
+ /* don't enter PS if TX frames are pending */
+ if (drv_tx_frames_pending(local)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(
local->hw.conf.dynamic_ps_timeout));
return;
}
+
+ /*
+ * transmission can be stopped by others which leads to
+ * dynamic_ps_timer expiry. Postpone the ps timer if it
+ * is not the actual idle state.
+ */
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ for (q = 0; q < local->hw.queues; q++) {
+ if (local->queue_stop_reasons[q]) {
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(
+ local->hw.conf.dynamic_ps_timeout));
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
(!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
@@ -804,7 +840,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- netif_tx_wake_all_queues(sdata->dev);
+ if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+ netif_tx_wake_all_queues(sdata->dev);
}
void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -906,7 +943,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.aifs, params.cw_min, params.cw_max,
params.txop, params.uapsd);
#endif
- if (drv_conf_tx(local, queue, &params))
+ sdata->tx_conf[queue] = params;
+ if (drv_conf_tx(local, sdata, queue, &params))
wiphy_debug(local->hw.wiphy,
"failed to set TX queue parameters for queue %d\n",
queue);
@@ -1064,7 +1102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
if (sta) {
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, tx);
}
mutex_unlock(&local->sta_mtx);
@@ -1106,8 +1144,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
+ /* remove AP and TDLS peers */
if (remove_sta)
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(local, sdata);
del_timer_sync(&sdata->u.mgd.conn_mon_timer);
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1207,7 +1246,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ieee80211_send_nullfunc(sdata->local, sdata, 0);
} else {
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
+ ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
+ (u32) -1, true, false);
}
ifmgd->probe_send_count++;
@@ -1292,7 +1332,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
- ssid + 2, ssid[1], NULL, 0);
+ (u32) -1, ssid + 2, ssid[1],
+ NULL, 0, true);
return skb;
}
@@ -1446,6 +1487,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
int i, j, err;
bool have_higher_than_11mbit = false;
u16 ap_ht_cap_flags;
+ int min_rate = INT_MAX, min_rate_index = -1;
/* AssocResp and ReassocResp have identical structure */
@@ -1479,17 +1521,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
ifmgd->aid = aid;
- sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
- if (!sta) {
- printk(KERN_DEBUG "%s: failed to alloc STA entry for"
- " the AP\n", sdata->name);
+ mutex_lock(&sdata->local->sta_mtx);
+ /*
+ * station info was already allocated and inserted before
+ * the association and should be available to us
+ */
+ sta = sta_info_get_rx(sdata, cbss->bssid);
+ if (WARN_ON(!sta)) {
+ mutex_unlock(&sdata->local->sta_mtx);
return false;
}
- set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
- WLAN_STA_ASSOC_AP);
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_ASSOC);
+ set_sta_flag(sta, WLAN_STA_ASSOC_AP);
if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
rates = 0;
basic_rates = 0;
@@ -1507,6 +1554,10 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
rates |= BIT(j);
if (is_basic)
basic_rates |= BIT(j);
+ if (rate < min_rate) {
+ min_rate = rate;
+ min_rate_index = j;
+ }
break;
}
}
@@ -1524,11 +1575,25 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
rates |= BIT(j);
if (is_basic)
basic_rates |= BIT(j);
+ if (rate < min_rate) {
+ min_rate = rate;
+ min_rate_index = j;
+ }
break;
}
}
}
+ /*
+ * some buggy APs don't advertise basic_rates. use the lowest
+ * supported rate instead.
+ */
+ if (unlikely(!basic_rates) && min_rate_index >= 0) {
+ printk(KERN_DEBUG "%s: No basic rates in AssocResp. "
+ "Using min supported rate instead.\n", sdata->name);
+ basic_rates = BIT(min_rate_index);
+ }
+
sta->sta.supp_rates[wk->chan->band] = rates;
sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -1548,12 +1613,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
rate_control_rate_init(sta);
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
- set_sta_flags(sta, WLAN_STA_MFP);
+ set_sta_flag(sta, WLAN_STA_MFP);
if (elems.wmm_param)
- set_sta_flags(sta, WLAN_STA_WME);
+ set_sta_flag(sta, WLAN_STA_WME);
- err = sta_info_insert(sta);
+ /* sta_info_reinsert will also unlock the mutex lock */
+ err = sta_info_reinsert(sta);
sta = NULL;
if (err) {
printk(KERN_DEBUG "%s: failed to insert STA entry for"
@@ -1581,7 +1647,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
(sdata->local->hw.queues >= 4) &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- cbss->bssid, ap_ht_cap_flags);
+ cbss->bssid, ap_ht_cap_flags,
+ false);
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
@@ -1762,6 +1829,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->ave_beacon_signal = rx_status->signal * 16;
ifmgd->last_cqm_event_signal = 0;
ifmgd->count_beacon_signal = 1;
+ ifmgd->last_ave_beacon_signal = 0;
} else {
ifmgd->ave_beacon_signal =
(IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
@@ -1769,6 +1837,28 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->ave_beacon_signal) / 16;
ifmgd->count_beacon_signal++;
}
+
+ if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
+ ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
+ int sig = ifmgd->ave_beacon_signal;
+ int last_sig = ifmgd->last_ave_beacon_signal;
+
+ /*
+ * if signal crosses either of the boundaries, invoke callback
+ * with appropriate parameters
+ */
+ if (sig > ifmgd->rssi_max_thold &&
+ (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
+ ifmgd->last_ave_beacon_signal = sig;
+ drv_rssi_callback(local, RSSI_EVENT_HIGH);
+ } else if (sig < ifmgd->rssi_min_thold &&
+ (last_sig >= ifmgd->rssi_max_thold ||
+ last_sig == 0)) {
+ ifmgd->last_ave_beacon_signal = sig;
+ drv_rssi_callback(local, RSSI_EVENT_LOW);
+ }
+ }
+
if (bss_conf->cqm_rssi_thold &&
ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
@@ -1892,7 +1982,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- bssid, ap_ht_cap_flags);
+ bssid, ap_ht_cap_flags, true);
}
/* Note: country IE parsing is done for us by cfg80211 */
@@ -2028,7 +2118,7 @@ static void ieee80211_sta_timer(unsigned long data)
}
static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
- u8 *bssid)
+ u8 *bssid, u8 reason)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -2046,8 +2136,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
* but that's not a problem.
*/
ieee80211_send_deauth_disassoc(sdata, bssid,
- IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ IEEE80211_STYPE_DEAUTH, reason,
NULL, true);
mutex_lock(&ifmgd->mtx);
}
@@ -2093,7 +2182,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
" AP %pM, disconnecting.\n",
sdata->name, bssid);
#endif
- ieee80211_sta_connection_lost(sdata, bssid);
+ ieee80211_sta_connection_lost(sdata, bssid,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
}
} else if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(ifmgd, ifmgd->probe_timeout);
@@ -2105,7 +2195,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
sdata->name,
bssid, probe_wait_ms);
#endif
- ieee80211_sta_connection_lost(sdata, bssid);
+ ieee80211_sta_connection_lost(sdata, bssid,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
} else if (ifmgd->probe_send_count < max_tries) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy,
@@ -2127,7 +2218,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
sdata->name,
bssid, probe_wait_ms);
- ieee80211_sta_connection_lost(sdata, bssid);
+ ieee80211_sta_connection_lost(sdata, bssid,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
}
}
@@ -2196,6 +2288,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
cancel_work_sync(&ifmgd->request_smps_work);
+ cancel_work_sync(&ifmgd->monitor_work);
cancel_work_sync(&ifmgd->beacon_connection_loss_work);
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -2204,7 +2297,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
if (del_timer_sync(&ifmgd->chswitch_timer))
set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
- cancel_work_sync(&ifmgd->monitor_work);
/* these will just be re-established on connection */
del_timer_sync(&ifmgd->conn_mon_timer);
del_timer_sync(&ifmgd->bcn_mon_timer);
@@ -2217,12 +2309,31 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
if (!ifmgd->associated)
return;
+ if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
+ sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
+ mutex_lock(&ifmgd->mtx);
+ if (ifmgd->associated) {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ wiphy_debug(sdata->local->hw.wiphy,
+ "%s: driver requested disconnect after resume.\n",
+ sdata->name);
+#endif
+ ieee80211_sta_connection_lost(sdata,
+ ifmgd->associated->bssid,
+ WLAN_REASON_UNSPECIFIED);
+ mutex_unlock(&ifmgd->mtx);
+ return;
+ }
+ mutex_unlock(&ifmgd->mtx);
+ }
+
if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
add_timer(&ifmgd->timer);
if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
add_timer(&ifmgd->chswitch_timer);
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_restart_sta_timer(sdata);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work);
}
#endif
@@ -2288,14 +2399,16 @@ static enum work_done_result
ieee80211_probe_auth_done(struct ieee80211_work *wk,
struct sk_buff *skb)
{
+ struct ieee80211_local *local = wk->sdata->local;
+
if (!skb) {
cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
- return WORK_DONE_DESTROY;
+ goto destroy;
}
if (wk->type == IEEE80211_WORK_AUTH) {
cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
- return WORK_DONE_DESTROY;
+ goto destroy;
}
mutex_lock(&wk->sdata->u.mgd.mtx);
@@ -2305,6 +2418,12 @@ ieee80211_probe_auth_done(struct ieee80211_work *wk,
wk->type = IEEE80211_WORK_AUTH;
wk->probe_auth.tries = 0;
return WORK_DONE_REQUEUE;
+ destroy:
+ if (wk->probe_auth.synced)
+ drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
+ IEEE80211_TX_SYNC_AUTH);
+
+ return WORK_DONE_DESTROY;
}
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -2374,17 +2493,43 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return 0;
}
+/* create and insert a dummy station entry */
+static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
+ u8 *bssid) {
+ struct sta_info *sta;
+ int err;
+
+ sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
+ if (!sta)
+ return -ENOMEM;
+
+ sta->dummy = true;
+
+ err = sta_info_insert(sta);
+ sta = NULL;
+ if (err) {
+ printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
+ " the AP (error %d)\n", sdata->name, err);
+ return err;
+ }
+
+ return 0;
+}
+
static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
struct sk_buff *skb)
{
+ struct ieee80211_local *local = wk->sdata->local;
struct ieee80211_mgmt *mgmt;
struct ieee80211_rx_status *rx_status;
struct ieee802_11_elems elems;
+ struct cfg80211_bss *cbss = wk->assoc.bss;
u16 status;
if (!skb) {
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
- return WORK_DONE_DESTROY;
+ goto destroy;
}
if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) {
@@ -2404,19 +2549,32 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
if (status == WLAN_STATUS_SUCCESS) {
+ if (wk->assoc.synced)
+ drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
+ IEEE80211_TX_SYNC_ASSOC);
+
mutex_lock(&wk->sdata->u.mgd.mtx);
if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
mutex_unlock(&wk->sdata->u.mgd.mtx);
/* oops -- internal error -- send timeout for now */
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
cfg80211_send_assoc_timeout(wk->sdata->dev,
wk->filter_ta);
return WORK_DONE_DESTROY;
}
mutex_unlock(&wk->sdata->u.mgd.mtx);
+ } else {
+ /* assoc failed - destroy the dummy station entry */
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
}
cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
+ destroy:
+ if (wk->assoc.synced)
+ drv_finish_tx_sync(local, wk->sdata, wk->filter_ta,
+ IEEE80211_TX_SYNC_ASSOC);
+
return WORK_DONE_DESTROY;
}
@@ -2427,7 +2585,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss = (void *)req->bss->priv;
struct ieee80211_work *wk;
const u8 *ssid;
- int i;
+ int i, err;
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated) {
@@ -2452,6 +2610,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (!wk)
return -ENOMEM;
+ /*
+ * create a dummy station info entry in order
+ * to start accepting incoming EAPOL packets from the station
+ */
+ err = ieee80211_pre_assoc(sdata, req->bss->bssid);
+ if (err) {
+ kfree(wk);
+ return err;
+ }
+
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
@@ -2551,7 +2719,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_work *wk;
u8 bssid[ETH_ALEN];
bool assoc_bss = false;
@@ -2564,30 +2731,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
assoc_bss = true;
} else {
bool not_auth_yet = false;
+ struct ieee80211_work *tmp, *wk = NULL;
mutex_unlock(&ifmgd->mtx);
mutex_lock(&local->mtx);
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
+ list_for_each_entry(tmp, &local->work_list, list) {
+ if (tmp->sdata != sdata)
continue;
- if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
- wk->type != IEEE80211_WORK_AUTH &&
- wk->type != IEEE80211_WORK_ASSOC &&
- wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
+ if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
+ tmp->type != IEEE80211_WORK_AUTH &&
+ tmp->type != IEEE80211_WORK_ASSOC &&
+ tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
continue;
- if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
+ if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
continue;
- not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
- list_del_rcu(&wk->list);
- free_work(wk);
+ not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
+ list_del_rcu(&tmp->list);
+ synchronize_rcu();
+ wk = tmp;
break;
}
mutex_unlock(&local->mtx);
+ if (wk && wk->type == IEEE80211_WORK_ASSOC) {
+ /* clean up dummy sta & TX sync */
+ sta_info_destroy_addr(wk->sdata, wk->filter_ta);
+ if (wk->assoc.synced)
+ drv_finish_tx_sync(local, wk->sdata,
+ wk->filter_ta,
+ IEEE80211_TX_SYNC_ASSOC);
+ } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
+ if (wk->probe_auth.synced)
+ drv_finish_tx_sync(local, wk->sdata,
+ wk->filter_ta,
+ IEEE80211_TX_SYNC_AUTH);
+ }
+ kfree(wk);
+
/*
* If somebody requests authentication and we haven't
* sent out an auth frame yet there's no need to send
@@ -2609,7 +2793,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->reason_code, cookie,
!req->local_state_change);
if (assoc_bss)
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(sdata->local, sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
@@ -2649,7 +2833,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
cookie, !req->local_state_change);
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(sdata->local, sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
@@ -2669,3 +2853,10 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
+
+unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ return sdata->dev->operstate;
+}
+EXPORT_SYMBOL(ieee80211_get_operstate);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index ecc4922..db2c215 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -12,19 +12,16 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-trace.h"
/*
- * Tell our hardware to disable PS.
- * Optionally inform AP that we will go to sleep so that it will buffer
- * the frames while we are doing off-channel work. This is optional
- * because we *may* be doing work on-operating channel, and want our
- * hardware unconditionally awake, but still let the AP send us normal frames.
+ * inform AP that we will go to sleep so that it will buffer the frames
+ * while we scan
*/
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
- bool tell_ap)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -45,8 +42,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (tell_ap && (!local->offchannel_ps_enabled ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
+ if (!(local->offchannel_ps_enabled) ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -81,9 +78,6 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
* we are sleeping, let's just enable power save mode in
* hardware.
*/
- /* TODO: Only set hardware if CONF_PS changed?
- * TODO: Should we set offchannel_ps_enabled to false?
- */
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
} else if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -102,52 +96,57 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_reset_conn_monitor(sdata);
}
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
+void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- /*
- * notify the AP about us leaving the channel and stop all
- * STA interfaces.
- */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
-
- /* Check to see if we should disable beaconing. */
+ /* disable beaconing */
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
ieee80211_bss_info_change_notify(
sdata, BSS_CHANGED_BEACON_ENABLED);
- if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ /*
+ * only handle non-STA interfaces here, STA interfaces
+ * are handled in ieee80211_offchannel_stop_station(),
+ * e.g., from the background scan state machine.
+ *
+ * In addition, do not stop monitor interface to allow it to be
+ * used from user space controlled off-channel operations.
+ */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
netif_tx_stop_all_queues(sdata->dev);
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata, true);
}
}
mutex_unlock(&local->iflist_mtx);
}
-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
- bool tell_ap)
+void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
+ /*
+ * notify the AP about us leaving the channel and stop all STA interfaces
+ */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata, tell_ap);
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ netif_tx_stop_all_queues(sdata->dev);
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata);
+ }
}
mutex_unlock(&local->iflist_mtx);
}
@@ -163,9 +162,10 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
continue;
/* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- sdata->u.mgd.associated)
- ieee80211_offchannel_ps_disable(sdata);
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_disable(sdata);
+ }
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
@@ -182,7 +182,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
netif_tx_wake_all_queues(sdata->dev);
}
- /* Check to see if we should re-enable beaconing */
+ /* re-enable beaconing */
if (enable_beaconing &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 730778a..9ee7164 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -6,18 +6,43 @@
#include "driver-ops.h"
#include "led.h"
+/* return value indicates whether the driver should be further notified */
+static bool ieee80211_quiesce(struct ieee80211_sub_if_data *sdata)
+{
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ieee80211_sta_quiesce(sdata);
+ return true;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_quiesce(sdata);
+ return true;
+ case NL80211_IFTYPE_MESH_POINT:
+ ieee80211_mesh_quiesce(sdata);
+ return true;
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MONITOR:
+ /* don't tell driver about this */
+ return false;
+ default:
+ return true;
+ }
+}
+
int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
+ if (!local->open_count)
+ goto suspend;
+
ieee80211_scan_cancel(local);
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
}
mutex_unlock(&local->sta_mtx);
@@ -50,11 +75,19 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
local->wowlan = wowlan && local->open_count;
if (local->wowlan) {
int err = drv_suspend(local, wowlan);
- if (err) {
+ if (err < 0) {
local->quiescing = false;
return err;
+ } else if (err > 0) {
+ WARN_ON(err != 1);
+ local->wowlan = false;
+ } else {
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ cancel_work_sync(&sdata->work);
+ ieee80211_quiesce(sdata);
+ }
+ goto suspend;
}
- goto suspend;
}
/* disable keys */
@@ -82,23 +115,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
list_for_each_entry(sdata, &local->interfaces, list) {
cancel_work_sync(&sdata->work);
- switch(sdata->vif.type) {
- case NL80211_IFTYPE_STATION:
- ieee80211_sta_quiesce(sdata);
- break;
- case NL80211_IFTYPE_ADHOC:
- ieee80211_ibss_quiesce(sdata);
- break;
- case NL80211_IFTYPE_MESH_POINT:
- ieee80211_mesh_quiesce(sdata);
- break;
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_MONITOR:
- /* don't tell driver about this */
+ if (!ieee80211_quiesce(sdata))
continue;
- default:
- break;
- }
if (!ieee80211_sdata_running(sdata))
continue;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 816590b..7d84b87 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "rate.h"
#include "ieee80211_i.h"
#include "debugfs.h"
@@ -199,7 +200,7 @@ static void rate_control_release(struct kref *kref)
kfree(ctrl_ref);
}
-static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
+static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -208,7 +209,9 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
fc = hdr->frame_control;
- return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
+ return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
+ IEEE80211_TX_CTL_USE_MINRATE)) ||
+ !ieee80211_is_data(fc);
}
static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
@@ -233,6 +236,27 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
/* could not find a basic rate; use original selection */
}
+static inline s8
+rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ struct ieee80211_rate *srate = &sband->bitrates[i];
+ if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
+ (srate->bitrate == 55) || (srate->bitrate == 110))
+ continue;
+
+ if (rate_supported(sta, sband->band, i))
+ return i;
+ }
+
+ /* No matching rate found */
+ return 0;
+}
+
+
bool rate_control_send_low(struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
@@ -241,8 +265,14 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband = txrc->sband;
int mcast_rate;
- if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) {
- info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta);
+ if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
+ if ((sband->band != IEEE80211_BAND_2GHZ) ||
+ !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ info->control.rates[0].idx =
+ rate_lowest_index(txrc->sband, sta);
+ else
+ info->control.rates[0].idx =
+ rate_lowest_non_cck_index(txrc->sband, sta);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8adac67..58a8955 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -532,12 +532,21 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
mp->hw = hw;
mp->update_interval = 100;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ mp->fixed_rate_idx = (u32) -1;
+ mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
+ S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
+#endif
+
return mp;
}
static void
minstrel_free(void *priv)
{
+#ifdef CONFIG_MAC80211_DEBUGFS
+ debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
+#endif
kfree(priv);
}
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 0f5a833..5d278ec 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -78,6 +78,18 @@ struct minstrel_priv {
unsigned int update_interval;
unsigned int lookaround_rate;
unsigned int lookaround_rate_mrr;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /*
+ * enable fixed rate processing per RC
+ * - write static index to debugfs:ieee80211/phyX/rc/fixed_rate_idx
+ * - write -1 to enable RC processing again
+ * - setting will be applied on next update
+ */
+ u32 fixed_rate_idx;
+ struct dentry *dbg_fixed_rate;
+#endif
+
};
struct minstrel_debugfs_info {
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index a290ad2..d5a5622 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -50,6 +50,7 @@
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 333b511..cdb2853 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -281,6 +281,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
if (cur_tp < mr->cur_tp) {
+ mi->max_tp_rate2 = mi->max_tp_rate;
+ cur_tp2 = cur_tp;
mi->max_tp_rate = mg->max_tp_rate;
cur_tp = mr->cur_tp;
}
@@ -452,7 +454,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
minstrel_ht_update_stats(mp, mi);
- minstrel_aggr_check(mp, sta, skb);
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ minstrel_aggr_check(mp, sta, skb);
}
}
@@ -608,7 +611,20 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
info->flags |= mi->tx_flags;
- sample_idx = minstrel_get_sample_rate(mp, mi);
+
+ /* Don't use EAPOL frames for sampling on non-mrr hw */
+ if (mp->hw->max_rates == 1 &&
+ txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
+ sample_idx = -1;
+ else
+ sample_idx = minstrel_get_sample_rate(mp, mi);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* use fixed index if set */
+ if (mp->fixed_rate_idx != -1)
+ sample_idx = mp->fixed_rate_idx;
+#endif
+
if (sample_idx >= 0) {
sample = true;
minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index cefcb5d..e788f76 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -10,6 +10,7 @@
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 4851e9e..c97a065 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "rate.h"
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 10e8842..7c53eff 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
@@ -334,15 +335,18 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- int tid;
+ int tid, seqno_idx, security_idx;
/* does the frame have a qos control field? */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
/* frame has qos control */
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
status->rx_flags |= IEEE80211_RX_AMSDU;
+
+ seqno_idx = tid;
+ security_idx = tid;
} else {
/*
* IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
@@ -355,10 +359,15 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
*
* We also use that counter for non-QoS STAs.
*/
- tid = NUM_RX_DATA_QUEUES - 1;
+ seqno_idx = NUM_RX_DATA_QUEUES;
+ security_idx = 0;
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ security_idx = NUM_RX_DATA_QUEUES;
+ tid = 0;
}
- rx->queue = tid;
+ rx->seqno_idx = seqno_idx;
+ rx->security_idx = security_idx;
/* Set skb->priority to 1d tag if highest order bit of TID is not set.
* For now, set skb->priority to 0 for other cases. */
rx->skb->priority = (tid > 7) ? 0 : tid;
@@ -412,10 +421,16 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning) ||
local->sched_scanning)
return ieee80211_scan_rx(rx->sdata, skb);
+ if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+ /* drop all the other packets during a software scan anyway */
+ if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
+ dev_kfree_skb(skb);
+ return RX_QUEUED;
+ }
+
/* scanning finished during invoking of handlers */
I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
return RX_DROP_UNUSABLE;
@@ -471,7 +486,6 @@ static ieee80211_rx_result
ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
char *dev_addr = rx->sdata->vif.addr;
if (ieee80211_is_data(hdr->frame_control)) {
@@ -501,6 +515,11 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_is_action(hdr->frame_control)) {
u8 category;
+
+ /* make sure category field is present */
+ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return RX_DROP_MONITOR;
+
mgmt = (struct ieee80211_mgmt *)hdr;
category = mgmt->u.action.category;
if (category != WLAN_CATEGORY_MESH_ACTION &&
@@ -519,14 +538,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
}
-#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
-
- if (ieee80211_is_data(hdr->frame_control) &&
- is_multicast_ether_addr(hdr->addr1) &&
- mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
- return RX_DROP_MONITOR;
-#undef msh_h_get
-
return RX_CONTINUE;
}
@@ -659,9 +670,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
set_release_timer:
- mod_timer(&tid_agg_rx->reorder_timer,
- tid_agg_rx->reorder_time[j] + 1 +
- HT_RX_REORDER_BUF_TIMEOUT);
+ if (!tid_agg_rx->removed)
+ mod_timer(&tid_agg_rx->reorder_timer,
+ tid_agg_rx->reorder_time[j] + 1 +
+ HT_RX_REORDER_BUF_TIMEOUT);
} else {
del_timer(&tid_agg_rx->reorder_timer);
}
@@ -753,7 +765,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
u16 sc;
int tid;
- if (!ieee80211_is_data_qos(hdr->frame_control))
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
goto dont_reorder;
/*
@@ -819,7 +832,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
!ieee80211_is_qos_nullfunc(hdr->frame_control) &&
!is_multicast_ether_addr(hdr->addr1)) {
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
- rx->sta->last_seq_ctrl[rx->queue] ==
+ rx->sta->last_seq_ctrl[rx->seqno_idx] ==
hdr->seq_ctrl)) {
if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
rx->local->dot11FrameDuplicateCount++;
@@ -827,7 +840,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
}
return RX_DROP_UNUSABLE;
} else
- rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
+ rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
}
if (unlikely(rx->skb->len < 16)) {
@@ -851,8 +864,23 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
- (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
+ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
+ if (rx->sta && rx->sta->dummy &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+ unsigned int hdrlen;
+ __be16 ethertype;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (rx->skb->len < hdrlen + 8)
+ return RX_DROP_MONITOR;
+
+ skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
+ if (ethertype == rx->sdata->control_port_protocol)
+ return RX_CONTINUE;
+ }
return RX_DROP_MONITOR;
+ }
return RX_CONTINUE;
}
@@ -1020,6 +1048,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
}
if (rx->key) {
+ if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
+ return RX_DROP_MONITOR;
+
rx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
} else {
@@ -1104,7 +1135,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
atomic_inc(&sdata->bss->num_sta_ps);
- set_sta_flags(sta, WLAN_STA_PS_STA);
+ set_sta_flag(sta, WLAN_STA_PS_STA);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -1124,7 +1155,7 @@ static void ap_sta_ps_end(struct sta_info *sta)
sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
- if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1143,7 +1174,7 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
/* Don't let the same PS state be set twice */
- in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+ in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
if ((start && in_ps) || (!start && !in_ps))
return -EINVAL;
@@ -1157,6 +1188,81 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
EXPORT_SYMBOL(ieee80211_sta_ps_transition);
static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ int tid, ac;
+
+ if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
+ return RX_CONTINUE;
+
+ /*
+ * The device handles station powersave, so don't do anything about
+ * uAPSD and PS-Poll frames (the latter shouldn't even come up from
+ * it to mac80211 since they're handled.)
+ */
+ if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ return RX_CONTINUE;
+
+ /*
+ * Don't do anything if the station isn't already asleep. In
+ * the uAPSD case, the station will probably be marked asleep,
+ * in the PS-Poll case the station must be confused ...
+ */
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
+ return RX_CONTINUE;
+
+ if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
+ if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_poll_response(rx->sta);
+ else
+ set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
+ }
+
+ /* Free PS Poll skb here instead of returning RX_DROP that would
+ * count as an dropped frame. */
+ dev_kfree_skb(rx->skb);
+
+ return RX_QUEUED;
+ } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
+ ieee80211_has_pm(hdr->frame_control) &&
+ (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))) {
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ ac = ieee802_1d_to_ac[tid & 7];
+
+ /*
+ * If this AC is not trigger-enabled do nothing.
+ *
+ * NB: This could/should check a separate bitmap of trigger-
+ * enabled queues, but for now we only implement uAPSD w/o
+ * TSPEC changes to the ACs, so they're always the same.
+ */
+ if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
+ return RX_CONTINUE;
+
+ /* if we are in a service period, do nothing */
+ if (test_sta_flag(rx->sta, WLAN_STA_SP))
+ return RX_CONTINUE;
+
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_uapsd(rx->sta);
+ else
+ set_sta_flag(rx->sta, WLAN_STA_UAPSD);
+ }
+
+ return RX_CONTINUE;
+}
+
+static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
{
struct sta_info *sta = rx->sta;
@@ -1214,7 +1320,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
- if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* Ignore doze->wake transitions that are
* indicated by non-data frames, the standard
@@ -1365,11 +1471,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
sc = le16_to_cpu(hdr->seq_ctrl);
frag = sc & IEEE80211_SCTL_FRAG;
- if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
- is_multicast_ether_addr(hdr->addr1))) {
- /* not fragmented */
- goto out;
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ rx->local->dot11MulticastReceivedFrameCount++;
+ goto out_no_led;
}
+
+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+ goto out;
+
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
if (skb_linearize(rx->skb))
@@ -1386,11 +1495,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (frag == 0) {
/* This is the first fragment of a new frame. */
entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
- rx->queue, &(rx->skb));
+ rx->seqno_idx, &(rx->skb));
if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
ieee80211_has_protected(fc)) {
- int queue = ieee80211_is_mgmt(fc) ?
- NUM_RX_DATA_QUEUES : rx->queue;
+ int queue = rx->security_idx;
/* Store CCMP PN so that we can verify that the next
* fragment has a sequential PN value. */
entry->ccmp = 1;
@@ -1404,7 +1512,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* This is a fragment for a frame that should already be pending in
* fragment cache. Add this fragment to the end of the pending entry.
*/
- entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
+ entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
+ rx->seqno_idx, hdr);
if (!entry) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
return RX_DROP_MONITOR;
@@ -1424,8 +1533,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (pn[i])
break;
}
- queue = ieee80211_is_mgmt(fc) ?
- NUM_RX_DATA_QUEUES : rx->queue;
+ queue = rx->security_idx;
rpn = rx->key->u.ccmp.rx_pn[queue];
if (memcmp(pn, rpn, CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
@@ -1461,43 +1569,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
+ ieee80211_led_rx(rx->local);
+ out_no_led:
if (rx->sta)
rx->sta->rx_packets++;
- if (is_multicast_ether_addr(hdr->addr1))
- rx->local->dot11MulticastReceivedFrameCount++;
- else
- ieee80211_led_rx(rx->local);
return RX_CONTINUE;
}
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-
- if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
- !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
- return RX_CONTINUE;
-
- if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
- (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
- return RX_DROP_UNUSABLE;
-
- if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
- ieee80211_sta_ps_deliver_poll_response(rx->sta);
- else
- set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
-
- /* Free PS Poll skb here instead of returning RX_DROP that would
- * count as an dropped frame. */
- dev_kfree_skb(rx->skb);
-
- return RX_QUEUED;
-}
-
-static ieee80211_rx_result debug_noinline
ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
{
u8 *data = rx->skb->data;
@@ -1520,7 +1599,7 @@ static int
ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
{
if (unlikely(!rx->sta ||
- !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
+ !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
return -EACCES;
return 0;
@@ -1563,7 +1642,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
- if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
+ if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
if (unlikely(!ieee80211_has_protected(fc) &&
ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key)) {
@@ -1827,15 +1906,45 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ /* make sure fixed part of mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb, hdrlen + 6))
+ return RX_DROP_MONITOR;
+
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
- if (!ieee80211_is_data(hdr->frame_control))
+ /* make sure full mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb,
+ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
+ return RX_DROP_MONITOR;
+
+ /* reload pointers */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
+ return RX_DROP_MONITOR;
+
+ /* frame is in RMC, don't forward */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
+ return RX_DROP_MONITOR;
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ !(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_CONTINUE;
if (!mesh_hdr->ttl)
/* illegal frame */
return RX_DROP_MONITOR;
+ if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
+ IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
+ dropped_frames_congestion);
+ return RX_DROP_MONITOR;
+ }
+
if (mesh_hdr->flags & MESH_FLAGS_AE) {
struct mesh_path *mppath;
char *proxied_addr;
@@ -1844,9 +1953,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (is_multicast_ether_addr(hdr->addr1)) {
mpp_addr = hdr->addr3;
proxied_addr = mesh_hdr->eaddr1;
- } else {
+ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
+ /* has_a4 already checked in ieee80211_rx_mesh_check */
mpp_addr = hdr->addr4;
proxied_addr = mesh_hdr->eaddr2;
+ } else {
+ return RX_DROP_MONITOR;
}
rcu_read_lock();
@@ -1869,7 +1981,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mesh_hdr->ttl--;
- if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
+ {
if (!mesh_hdr->ttl)
IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
dropped_frames_ttl);
@@ -1891,13 +2003,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
- skb_set_queue_mapping(skb,
- ieee80211_select_queue(rx->sdata, fwd_skb));
- ieee80211_set_qos_hdr(local, skb);
- if (is_multicast_ether_addr(fwd_hdr->addr1))
+ if (is_multicast_ether_addr(fwd_hdr->addr1)) {
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
- else {
+ skb_set_queue_mapping(fwd_skb,
+ ieee80211_select_queue(sdata, fwd_skb));
+ ieee80211_set_qos_hdr(sdata, fwd_skb);
+ } else {
int err;
/*
* Save TA to addr1 to send TA a path error if a
@@ -2222,12 +2334,37 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
+ case WLAN_CATEGORY_SELF_PROTECTED:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.self_prot.action_code)))
+ break;
+
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ goto invalid;
+ if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
+ /* userspace handles this frame */
+ break;
+ goto queue;
+ case WLAN_SP_MGK_INFORM:
+ case WLAN_SP_MGK_ACK:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ goto invalid;
+ break;
+ }
+ break;
case WLAN_CATEGORY_MESH_ACTION:
+ if (len < (IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.mesh_action.action_code)))
+ break;
+
if (!ieee80211_vif_is_mesh(&sdata->vif))
break;
- goto queue;
- case WLAN_CATEGORY_MESH_PATH_SEL:
- if (!mesh_path_sel_is_hwmp(sdata))
+ if (mesh_action_is_path_sel(mgmt) &&
+ (!mesh_path_sel_is_hwmp(sdata)))
break;
goto queue;
}
@@ -2539,17 +2676,17 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
+ CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
CALL_RXH(ieee80211_rx_h_sta_process)
CALL_RXH(ieee80211_rx_h_defragment)
- CALL_RXH(ieee80211_rx_h_ps_poll)
CALL_RXH(ieee80211_rx_h_michael_mic_verify)
/* must be after MMIC verify so header is counted in MPDU mic */
- CALL_RXH(ieee80211_rx_h_remove_qos_control)
- CALL_RXH(ieee80211_rx_h_amsdu)
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&rx->sdata->vif))
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
+ CALL_RXH(ieee80211_rx_h_remove_qos_control)
+ CALL_RXH(ieee80211_rx_h_amsdu)
CALL_RXH(ieee80211_rx_h_data)
CALL_RXH(ieee80211_rx_h_ctrl);
CALL_RXH(ieee80211_rx_h_mgmt_check)
@@ -2605,7 +2742,9 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
.sta = sta,
.sdata = sta->sdata,
.local = sta->local,
- .queue = tid,
+ /* This is OK -- must be QoS data frame */
+ .security_idx = tid,
+ .seqno_idx = tid,
.flags = 0,
};
struct tid_ampdu_rx *tid_agg_rx;
@@ -2647,6 +2786,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_ADHOC:
if (!bssid)
return 0;
+ if (compare_ether_addr(sdata->vif.addr, hdr->addr2) == 0 ||
+ compare_ether_addr(sdata->u.ibss.bssid, hdr->addr2) == 0)
+ return 0;
if (ieee80211_is_beacon(hdr->frame_control)) {
return 1;
}
@@ -2689,7 +2831,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !ieee80211_is_beacon(hdr->frame_control))
+ !ieee80211_is_beacon(hdr->frame_control) &&
+ !(ieee80211_is_action(hdr->frame_control) &&
+ sdata->vif.p2p))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2774,7 +2918,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
local->dot11ReceivedFragmentCount++;
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning)))
+ test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
status->rx_flags |= IEEE80211_RX_IN_SCAN;
if (ieee80211_is_mgmt(fc)) {
@@ -2799,7 +2943,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc)) {
prev_sta = NULL;
- for_each_sta_info(local, hdr->addr2, sta, tmp) {
+ for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
@@ -2843,7 +2987,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
continue;
}
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
rx.sdata = prev;
ieee80211_prepare_and_rx_handle(&rx, skb, false);
@@ -2851,7 +2995,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
if (prev) {
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
rx.sdata = prev;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 7c75741..0aeea49 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,9 +14,10 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -212,14 +213,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (bss)
ieee80211_rx_bss_put(sdata->local, bss);
- /* If we are on-operating-channel, and this packet is for the
- * current channel, pass the pkt on up the stack so that
- * the rest of the stack can make use of it.
- */
- if (ieee80211_cfg_on_oper_channel(sdata->local)
- && (channel == sdata->local->oper_channel))
- return RX_CONTINUE;
-
dev_kfree_skb(skb);
return RX_QUEUED;
}
@@ -231,6 +224,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
enum ieee80211_band band;
int i, ielen, n_chans;
+ if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+ return false;
+
do {
if (local->hw_scan_band == IEEE80211_NUM_BANDS)
return false;
@@ -251,9 +247,10 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
local->hw_scan_req->n_channels = n_chans;
ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
- req->ie, req->ie_len, band, (u32) -1,
- 0);
+ req->ie, req->ie_len, band,
+ req->rates[band], 0);
local->hw_scan_req->ie_len = ielen;
+ local->hw_scan_req->no_cck = req->no_cck;
return true;
}
@@ -262,8 +259,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
- bool on_oper_chan;
- bool enable_beacons = false;
lockdep_assert_held(&local->mtx);
@@ -296,25 +291,11 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
local->scanning = 0;
local->scan_channel = NULL;
- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
- if (was_hw_scan || !on_oper_chan)
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- else
- /* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
-
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
if (!was_hw_scan) {
- bool on_oper_chan2;
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
- /* We should always be on-channel at this point. */
- WARN_ON(!on_oper_chan2);
- if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
- enable_beacons = true;
-
- ieee80211_offchannel_return(local, enable_beacons);
+ ieee80211_offchannel_return(local, true);
}
ieee80211_recalc_idle(local);
@@ -355,15 +336,13 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
*/
drv_sw_scan_start(local);
+ ieee80211_offchannel_stop_beaconing(local);
+
local->leave_oper_channel_time = 0;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- /* We always want to use off-channel PS, even if we
- * are not really leaving oper-channel. Don't
- * tell the AP though, as long as we are on-channel.
- */
- ieee80211_offchannel_enable_all_ps(local, false);
+ drv_flush(local, false);
ieee80211_configure_filter(local);
@@ -506,20 +485,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
}
mutex_unlock(&local->iflist_mtx);
- next_chan = local->scan_req->channels[local->scan_channel_idx];
-
- if (ieee80211_cfg_on_oper_channel(local)) {
- /* We're currently on operating channel. */
- if (next_chan == local->oper_channel)
- /* We don't need to move off of operating channel. */
- local->next_scan_state = SCAN_SET_CHANNEL;
- else
- /*
- * We do need to leave operating channel, as next
- * scan is somewhere else.
- */
- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
- } else {
+ if (local->scan_channel) {
/*
* we're currently scanning a different channel, let's
* see if we can scan another channel without interfering
@@ -535,6 +501,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
*
* Otherwise switch back to the operating channel.
*/
+ next_chan = local->scan_req->channels[local->scan_channel_idx];
bad_latency = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
@@ -552,6 +519,12 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
else
local->next_scan_state = SCAN_SET_CHANNEL;
+ } else {
+ /*
+ * we're on the operating channel currently, let's
+ * leave that channel now to scan another one
+ */
+ local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
}
*next_delay = 0;
@@ -560,10 +533,9 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- /* PS will already be in off-channel mode,
- * we do that once at the beginning of scanning.
- */
- ieee80211_offchannel_stop_vifs(local);
+ ieee80211_offchannel_stop_station(local);
+
+ __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
/*
* What if the nullfunc frames didn't arrive?
@@ -586,15 +558,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
{
/* switch back to the operating channel */
local->scan_channel = NULL;
- if (!ieee80211_cfg_on_oper_channel(local))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
- * Re-enable vifs and beaconing. Leave PS
- * in off-channel state..will put that back
- * on-channel at the end of scanning.
+ * Only re-enable station mode interface now; beaconing will be
+ * re-enabled once the full scan has been completed.
*/
- ieee80211_offchannel_return(local, true);
+ ieee80211_offchannel_return(local, false);
+
+ __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
*next_delay = HZ / 5;
local->next_scan_state = SCAN_DECISION;
@@ -652,13 +624,16 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
{
int i;
struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ enum ieee80211_band band = local->hw.conf.channel->band;
for (i = 0; i < local->scan_req->n_ssids; i++)
ieee80211_send_probe_req(
sdata, NULL,
local->scan_req->ssids[i].ssid,
local->scan_req->ssids[i].ssid_len,
- local->scan_req->ie, local->scan_req->ie_len);
+ local->scan_req->ie, local->scan_req->ie_len,
+ local->scan_req->rates[band], false,
+ local->scan_req->no_cck);
/*
* After sending probe requests, wait for probe responses
@@ -821,10 +796,8 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
*/
void ieee80211_scan_cancel(struct ieee80211_local *local)
{
- bool abortscan;
-
/*
- * We are only canceling software scan, or deferred scan that was not
+ * We are canceling software scan, or deferred scan that was not
* yet really started (see __ieee80211_start_scan ).
*
* Regarding hardware scan:
@@ -836,23 +809,46 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
* - we can not cancel scan_work since driver can schedule it
* by ieee80211_scan_completed(..., true) to finish scan
*
- * Hence low lever driver is responsible for canceling HW scan.
+ * Hence we only call the cancel_hw_scan() callback, but the low-level
+ * driver is still responsible for calling ieee80211_scan_completed()
+ * after the scan was completed/aborted.
*/
mutex_lock(&local->mtx);
- abortscan = local->scan_req && !test_bit(SCAN_HW_SCANNING, &local->scanning);
- if (abortscan) {
+ if (!local->scan_req)
+ goto out;
+
+ /*
+ * We have a scan running and the driver already reported completion,
+ * but the worker hasn't run yet or is stuck on the mutex - mark it as
+ * cancelled.
+ */
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+ test_bit(SCAN_COMPLETED, &local->scanning)) {
+ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ goto out;
+ }
+
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
/*
- * The scan is canceled, but stop work from being pending.
- *
- * If the work is currently running, it must be blocked on
- * the mutex, but we'll set scan_sdata = NULL and it'll
- * simply exit once it acquires the mutex.
+ * Make sure that __ieee80211_scan_completed doesn't trigger a
+ * scan on another band.
*/
- cancel_delayed_work(&local->scan_work);
- /* and clean up */
- __ieee80211_scan_completed(&local->hw, true, false);
+ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ if (local->ops->cancel_hw_scan)
+ drv_cancel_hw_scan(local, local->scan_sdata);
+ goto out;
}
+
+ /*
+ * If the work is currently running, it must be blocked on
+ * the mutex, but we'll set scan_sdata = NULL and it'll
+ * simply exit once it acquires the mutex.
+ */
+ cancel_delayed_work(&local->scan_work);
+ /* and clean up */
+ __ieee80211_scan_completed(&local->hw, true, false);
+out:
mutex_unlock(&local->mtx);
}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 7733f66..578eea3 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -32,12 +32,8 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
sizeof(struct ieee80211_msrment_ie));
-
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer for "
- "measurement report frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 065a971..1914f5a 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -24,6 +24,7 @@
#include "sta_info.h"
#include "debugfs_sta.h"
#include "mesh.h"
+#include "wme.h"
/**
* DOC: STA information lifetime rules
@@ -97,7 +98,27 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ while (sta) {
+ if (sta->sdata == sdata && !sta->dummy &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference_check(sta->hnext,
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ }
+ return sta;
+}
+
+/* get a station info entry even if it is a dummy station*/
+struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
@@ -105,7 +126,6 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
- rcu_read_lock_held() ||
lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
}
@@ -123,7 +143,32 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
- rcu_read_lock_held() ||
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ while (sta) {
+ if ((sta->sdata == sdata ||
+ (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
+ !sta->dummy &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference_check(sta->hnext,
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ }
+ return sta;
+}
+
+/*
+ * Get sta info either from the specified interface
+ * or from one of its vlans (including dummy stations)
+ */
+struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
@@ -132,7 +177,6 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
- rcu_read_lock_held() ||
lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
}
@@ -200,13 +244,22 @@ static void sta_unblock(struct work_struct *wk)
if (sta->dead)
return;
- if (!test_sta_flags(sta, WLAN_STA_PS_STA))
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA))
ieee80211_sta_ps_deliver_wakeup(sta);
- else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
+ local_bh_disable();
ieee80211_sta_ps_deliver_poll_response(sta);
+ local_bh_enable();
+ } else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) {
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
+ local_bh_disable();
+ ieee80211_sta_ps_deliver_uapsd(sta);
+ local_bh_enable();
} else
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
}
static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -239,7 +292,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return NULL;
spin_lock_init(&sta->lock);
- spin_lock_init(&sta->flaglock);
+ spin_lock_init(&sta->ps_lock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -266,8 +319,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
*/
sta->timer_to_tid[i] = i;
}
- skb_queue_head_init(&sta->ps_tx_buf);
- skb_queue_head_init(&sta->tx_filtered);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb_queue_head_init(&sta->ps_tx_buf[i]);
+ skb_queue_head_init(&sta->tx_filtered[i]);
+ }
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
@@ -284,7 +339,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-static int sta_info_finish_insert(struct sta_info *sta, bool async)
+static int sta_info_finish_insert(struct sta_info *sta,
+ bool async, bool dummy_reinsert)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -294,51 +350,58 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
lockdep_assert_held(&local->sta_mtx);
- /* notify driver */
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
- err = drv_sta_add(local, sdata, &sta->sta);
- if (err) {
- if (!async)
- return err;
- printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
- " - keeping it anyway.\n",
- sdata->name, sta->sta.addr, err);
- } else {
- sta->uploaded = true;
+ if (!sta->dummy || dummy_reinsert) {
+ /* notify driver */
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ err = drv_sta_add(local, sdata, &sta->sta);
+ if (err) {
+ if (!async)
+ return err;
+ printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
+ "driver (%d) - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, err);
+ } else {
+ sta->uploaded = true;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (async)
- wiphy_debug(local->hw.wiphy,
- "Finished adding IBSS STA %pM\n",
- sta->sta.addr);
+ if (async)
+ wiphy_debug(local->hw.wiphy,
+ "Finished adding IBSS STA %pM\n",
+ sta->sta.addr);
#endif
+ }
+
+ sdata = sta->sdata;
}
- sdata = sta->sdata;
+ if (!dummy_reinsert) {
+ if (!async) {
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
- if (!async) {
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
+ /* make the station visible */
+ spin_lock_irqsave(&local->sta_lock, flags);
+ sta_info_hash_add(local, sta);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ }
- /* make the station visible */
- spin_lock_irqsave(&local->sta_lock, flags);
- sta_info_hash_add(local, sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ list_add(&sta->list, &local->sta_list);
+ } else {
+ sta->dummy = false;
}
- list_add(&sta->list, &local->sta_list);
-
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
-
- memset(&sinfo, 0, sizeof(sinfo));
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ if (!sta->dummy) {
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ }
return 0;
}
@@ -355,7 +418,7 @@ static void sta_info_finish_pending(struct ieee80211_local *local)
list_del(&sta->list);
spin_unlock_irqrestore(&local->sta_lock, flags);
- sta_info_finish_insert(sta, true);
+ sta_info_finish_insert(sta, true, false);
spin_lock_irqsave(&local->sta_lock, flags);
}
@@ -372,106 +435,117 @@ static void sta_info_finish_work(struct work_struct *work)
mutex_unlock(&local->sta_mtx);
}
-int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+static int sta_info_insert_check(struct sta_info *sta)
{
- struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- unsigned long flags;
- int err = 0;
/*
* Can't be a WARN_ON because it can be triggered through a race:
* something inserts a STA (on one CPU) without holding the RTNL
* and another CPU turns off the net device.
*/
- if (unlikely(!ieee80211_sdata_running(sdata))) {
- err = -ENETDOWN;
- rcu_read_lock();
- goto out_free;
- }
+ if (unlikely(!ieee80211_sdata_running(sdata)))
+ return -ENETDOWN;
if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
- is_multicast_ether_addr(sta->sta.addr))) {
- err = -EINVAL;
+ is_multicast_ether_addr(sta->sta.addr)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ /* check if STA exists already */
+ if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
rcu_read_lock();
- goto out_free;
+ return -EEXIST;
}
- /*
- * In ad-hoc mode, we sometimes need to insert stations
- * from tasklet context from the RX path. To avoid races,
- * always do so in that case -- see the comment below.
- */
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- rcu_read_lock();
- err = -EEXIST;
- goto out_free;
- }
-
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
- sta_info_hash_add(local, sta);
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+ sta_info_hash_add(local, sta);
- list_add_tail(&sta->list, &local->sta_pending_list);
+ list_add_tail(&sta->list, &local->sta_pending_list);
- rcu_read_lock();
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
+ spin_unlock_irqrestore(&local->sta_lock, flags);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
- sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
+ sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
- ieee80211_queue_work(&local->hw, &local->sta_finish_work);
+ ieee80211_queue_work(&local->hw, &local->sta_finish_work);
- return 0;
- }
+ return 0;
+}
+
+/*
+ * should be called with sta_mtx locked
+ * this function replaces the mutex lock
+ * with a RCU lock
+ */
+static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+ struct sta_info *exist_sta;
+ bool dummy_reinsert = false;
+ int err = 0;
+
+ lockdep_assert_held(&local->sta_mtx);
/*
* On first glance, this will look racy, because the code
- * below this point, which inserts a station with sleeping,
+ * in this function, which inserts a station with sleeping,
* unlocks the sta_lock between checking existence in the
* hash table and inserting into it.
*
* However, it is not racy against itself because it keeps
- * the mutex locked. It still seems to race against the
- * above code that atomically inserts the station... That,
- * however, is not true because the above code can only
- * be invoked for IBSS interfaces, and the below code will
- * not be -- and the two do not race against each other as
- * the hash table also keys off the interface.
+ * the mutex locked.
*/
- might_sleep();
-
- mutex_lock(&local->sta_mtx);
-
spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- mutex_unlock(&local->sta_mtx);
- rcu_read_lock();
- err = -EEXIST;
- goto out_free;
+ /*
+ * check if STA exists already.
+ * only accept a scenario of a second call to sta_info_insert_non_ibss
+ * with a dummy station entry that was inserted earlier
+ * in that case - assume that the dummy station flag should
+ * be removed.
+ */
+ exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
+ if (exist_sta) {
+ if (exist_sta == sta && sta->dummy) {
+ dummy_reinsert = true;
+ } else {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+ return -EEXIST;
+ }
}
spin_unlock_irqrestore(&local->sta_lock, flags);
- err = sta_info_finish_insert(sta, false);
+ err = sta_info_finish_insert(sta, false, dummy_reinsert);
if (err) {
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
- goto out_free;
+ return err;
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n",
+ sta->dummy ? "dummy " : "", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
/* move reference to rcu-protected */
@@ -482,6 +556,51 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
mesh_accept_plinks_update(sdata);
return 0;
+}
+
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ int err = 0;
+
+ err = sta_info_insert_check(sta);
+ if (err) {
+ rcu_read_lock();
+ goto out_free;
+ }
+
+ /*
+ * In ad-hoc mode, we sometimes need to insert stations
+ * from tasklet context from the RX path. To avoid races,
+ * always do so in that case -- see the comment below.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ err = sta_info_insert_ibss(sta);
+ if (err)
+ goto out_free;
+
+ return 0;
+ }
+
+ /*
+ * It might seem that the function called below is in race against
+ * the function call above that atomically inserts the station... That,
+ * however, is not true because the above code can only
+ * be invoked for IBSS interfaces, and the below code will
+ * not be -- and the two do not race against each other as
+ * the hash table also keys off the interface.
+ */
+
+ might_sleep();
+
+ mutex_lock(&local->sta_mtx);
+
+ err = sta_info_insert_non_ibss(sta);
+ if (err)
+ goto out_free;
+
+ return 0;
out_free:
BUG_ON(!err);
__sta_info_free(local, sta);
@@ -497,6 +616,25 @@ int sta_info_insert(struct sta_info *sta)
return err;
}
+/* Caller must hold sta->local->sta_mtx */
+int sta_info_reinsert(struct sta_info *sta)
+{
+ struct ieee80211_local *local = sta->local;
+ int err = 0;
+
+ err = sta_info_insert_check(sta);
+ if (err) {
+ mutex_unlock(&local->sta_mtx);
+ return err;
+ }
+
+ might_sleep();
+
+ err = sta_info_insert_non_ibss(sta);
+ rcu_read_unlock();
+ return err;
+}
+
static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
{
/*
@@ -515,64 +653,93 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
bss->tim[aid / 8] &= ~(1 << (aid % 8));
}
-static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
- struct sta_info *sta)
+static unsigned long ieee80211_tids_for_ac(int ac)
{
- BUG_ON(!bss);
-
- __bss_tim_set(bss, sta->sta.aid);
-
- if (sta->local->ops->set_tim) {
- sta->local->tim_in_locked_section = true;
- drv_set_tim(sta->local, &sta->sta, true);
- sta->local->tim_in_locked_section = false;
+ /* If we ever support TIDs > 7, this obviously needs to be adjusted */
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ return BIT(6) | BIT(7);
+ case IEEE80211_AC_VI:
+ return BIT(4) | BIT(5);
+ case IEEE80211_AC_BE:
+ return BIT(0) | BIT(3);
+ case IEEE80211_AC_BK:
+ return BIT(1) | BIT(2);
+ default:
+ WARN_ON(1);
+ return 0;
}
}
-void sta_info_set_tim_bit(struct sta_info *sta)
+void sta_info_recalc_tim(struct sta_info *sta)
{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_if_ap *bss = sta->sdata->bss;
unsigned long flags;
+ bool indicate_tim = false;
+ u8 ignore_for_tim = sta->sta.uapsd_queues;
+ int ac;
- BUG_ON(!sta->sdata->bss);
+ if (WARN_ON_ONCE(!sta->sdata->bss))
+ return;
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- __sta_info_set_tim_bit(sta->sdata->bss, sta);
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
-}
+ /* No need to do anything if the driver does all */
+ if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ return;
-static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
- struct sta_info *sta)
-{
- BUG_ON(!bss);
+ if (sta->dead)
+ goto done;
- __bss_tim_clear(bss, sta->sta.aid);
+ /*
+ * If all ACs are delivery-enabled then we should build
+ * the TIM bit for all ACs anyway; if only some are then
+ * we ignore those and build the TIM bit using only the
+ * non-enabled ones.
+ */
+ if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
+ ignore_for_tim = 0;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ unsigned long tids;
+
+ if (ignore_for_tim & BIT(ac))
+ continue;
+
+ indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac]);
+ if (indicate_tim)
+ break;
- if (sta->local->ops->set_tim) {
- sta->local->tim_in_locked_section = true;
- drv_set_tim(sta->local, &sta->sta, false);
- sta->local->tim_in_locked_section = false;
+ tids = ieee80211_tids_for_ac(ac);
+
+ indicate_tim |=
+ sta->driver_buffered_tids & tids;
}
-}
-void sta_info_clear_tim_bit(struct sta_info *sta)
-{
- unsigned long flags;
+ done:
+ spin_lock_irqsave(&local->sta_lock, flags);
- BUG_ON(!sta->sdata->bss);
+ if (indicate_tim)
+ __bss_tim_set(bss, sta->sta.aid);
+ else
+ __bss_tim_clear(bss, sta->sta.aid);
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- __sta_info_clear_tim_bit(sta->sdata->bss, sta);
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
+ if (local->ops->set_tim) {
+ local->tim_in_locked_section = true;
+ drv_set_tim(local, &sta->sta, indicate_tim);
+ local->tim_in_locked_section = false;
+ }
+
+ spin_unlock_irqrestore(&local->sta_lock, flags);
}
-static int sta_info_buffer_expired(struct sta_info *sta,
- struct sk_buff *skb)
+static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
int timeout;
if (!skb)
- return 0;
+ return false;
info = IEEE80211_SKB_CB(skb);
@@ -586,24 +753,59 @@ static int sta_info_buffer_expired(struct sta_info *sta,
}
-static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
- struct sta_info *sta)
+static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
+ struct sta_info *sta, int ac)
{
unsigned long flags;
struct sk_buff *skb;
- if (skb_queue_empty(&sta->ps_tx_buf))
- return false;
+ /*
+ * First check for frames that should expire on the filtered
+ * queue. Frames here were rejected by the driver and are on
+ * a separate queue to avoid reordering with normal PS-buffered
+ * frames. They also aren't accounted for right now in the
+ * total_ps_buffered counter.
+ */
+ for (;;) {
+ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
+ skb = skb_peek(&sta->tx_filtered[ac]);
+ if (sta_info_buffer_expired(sta, skb))
+ skb = __skb_dequeue(&sta->tx_filtered[ac]);
+ else
+ skb = NULL;
+ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
+
+ /*
+ * Frames are queued in order, so if this one
+ * hasn't expired yet we can stop testing. If
+ * we actually reached the end of the queue we
+ * also need to stop, of course.
+ */
+ if (!skb)
+ break;
+ dev_kfree_skb(skb);
+ }
+ /*
+ * Now also check the normal PS-buffered queue, this will
+ * only find something if the filtered queue was emptied
+ * since the filtered frames are all before the normal PS
+ * buffered frames.
+ */
for (;;) {
- spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
- skb = skb_peek(&sta->ps_tx_buf);
+ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
+ skb = skb_peek(&sta->ps_tx_buf[ac]);
if (sta_info_buffer_expired(sta, skb))
- skb = __skb_dequeue(&sta->ps_tx_buf);
+ skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
else
skb = NULL;
- spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags);
+ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
+ /*
+ * frames are queued in order, so if this one
+ * hasn't expired yet (or we reached the end of
+ * the queue) we can stop testing
+ */
if (!skb)
break;
@@ -613,22 +815,47 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
sta->sta.addr);
#endif
dev_kfree_skb(skb);
-
- if (skb_queue_empty(&sta->ps_tx_buf) &&
- !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
- sta_info_clear_tim_bit(sta);
}
- return true;
+ /*
+ * Finally, recalculate the TIM bit for this station -- it might
+ * now be clear because the station was too slow to retrieve its
+ * frames.
+ */
+ sta_info_recalc_tim(sta);
+
+ /*
+ * Return whether there are any frames still buffered, this is
+ * used to check whether the cleanup timer still needs to run,
+ * if there are no frames we don't need to rearm the timer.
+ */
+ return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
+ skb_queue_empty(&sta->tx_filtered[ac]));
+}
+
+static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
+ struct sta_info *sta)
+{
+ bool have_buffered = false;
+ int ac;
+
+ /* This is only necessary for stations on BSS interfaces */
+ if (!sta->sdata->bss)
+ return false;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ have_buffered |=
+ sta_info_cleanup_expire_buffered_ac(local, sta, ac);
+
+ return have_buffered;
}
static int __must_check __sta_info_destroy(struct sta_info *sta)
{
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
- struct sk_buff *skb;
unsigned long flags;
- int ret, i;
+ int ret, i, ac;
might_sleep();
@@ -644,7 +871,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
* sessions -- block that to make sure the tear-down
* will be sufficient.
*/
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
spin_lock_irqsave(&local->sta_lock, flags);
@@ -665,19 +892,22 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
sta->dead = true;
- if (test_and_clear_sta_flags(sta,
- WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
BUG_ON(!sdata->bss);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
atomic_dec(&sdata->bss->num_sta_ps);
- sta_info_clear_tim_bit(sta);
+ sta_info_recalc_tim(sta);
}
local->num_sta--;
local->sta_generation++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
if (sta->uploaded) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -696,6 +926,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
*/
synchronize_rcu();
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
+ __skb_queue_purge(&sta->ps_tx_buf[ac]);
+ __skb_queue_purge(&sta->tx_filtered[ac]);
+ }
+
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
@@ -718,14 +954,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
}
#endif
- while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
- local->total_ps_buffered--;
- dev_kfree_skb_any(skb);
- }
-
- while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
- dev_kfree_skb_any(skb);
-
__sta_info_free(local, sta);
return 0;
@@ -737,7 +965,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get(sdata, addr);
+ sta = sta_info_get_rx(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -751,7 +979,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get_bss(sdata, addr);
+ sta = sta_info_get_bss_rx(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -891,7 +1119,8 @@ static void clear_sta_ps_flags(void *_sta)
{
struct sta_info *sta = _sta;
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
}
/* powersave support code */
@@ -899,88 +1128,350 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- int sent, buffered;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
+ unsigned long flags;
+
+ clear_sta_flag(sta, WLAN_STA_SP);
+
+ BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1);
+ sta->driver_buffered_tids = 0;
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
- if (!skb_queue_empty(&sta->ps_tx_buf))
- sta_info_clear_tim_bit(sta);
+ skb_queue_head_init(&pending);
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+ spin_lock(&sta->ps_lock);
/* Send all buffered frames to the station */
- sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
- buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf,
- clear_sta_ps_flags, sta);
- sent += buffered;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+
+ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
+ skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
+ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
+ tmp = skb_queue_len(&pending);
+ filtered += tmp - count;
+ count = tmp;
+
+ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
+ skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
+ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
+ tmp = skb_queue_len(&pending);
+ buffered += tmp - count;
+ }
+
+ ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+ spin_unlock(&sta->ps_lock);
+
local->total_ps_buffered -= buffered;
+ sta_info_recalc_tim(sta);
+
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
"since STA not sleeping anymore\n", sdata->name,
- sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
+ sta->sta.addr, sta->sta.aid, filtered, buffered);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
-void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, int tid,
+ enum ieee80211_frame_release_type reason)
{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_qos_hdr *nullfunc;
struct sk_buff *skb;
- int no_pending_pkts;
+ int size = sizeof(*nullfunc);
+ __le16 fc;
+ bool qos = test_sta_flag(sta, WLAN_STA_WME);
+ struct ieee80211_tx_info *info;
- skb = skb_dequeue(&sta->tx_filtered);
- if (!skb) {
- skb = skb_dequeue(&sta->ps_tx_buf);
- if (skb)
- local->total_ps_buffered--;
+ if (qos) {
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ } else {
+ size -= 2;
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (void *) skb_put(skb, size);
+ nullfunc->frame_control = fc;
+ nullfunc->duration_id = 0;
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+ nullfunc->seq_ctrl = 0;
+
+ skb->priority = tid;
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+ if (qos) {
+ nullfunc->qos_ctrl = cpu_to_le16(tid);
+
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+ nullfunc->qos_ctrl |=
+ cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
}
- no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
- skb_queue_empty(&sta->ps_tx_buf);
- if (skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) skb->data;
+ info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * Tell TX path to send this frame even though the
+ * STA may still remain is PS mode after this frame
+ * exchange. Also set EOSP to indicate this packet
+ * ends the poll/service period.
+ */
+ info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE |
+ IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
+
+ ieee80211_xmit(sdata, skb);
+}
+
+static void
+ieee80211_sta_ps_deliver_response(struct sta_info *sta,
+ int n_frames, u8 ignored_acs,
+ enum ieee80211_frame_release_type reason)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ bool found = false;
+ bool more_data = false;
+ int ac;
+ unsigned long driver_release_tids = 0;
+ struct sk_buff_head frames;
+
+ /* Service or PS-Poll period starts */
+ set_sta_flag(sta, WLAN_STA_SP);
+
+ __skb_queue_head_init(&frames);
+
+ /*
+ * Get response frame(s) and more data bit for it.
+ */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ unsigned long tids;
+
+ if (ignored_acs & BIT(ac))
+ continue;
+
+ tids = ieee80211_tids_for_ac(ac);
+
+ if (!found) {
+ driver_release_tids = sta->driver_buffered_tids & tids;
+ if (driver_release_tids) {
+ found = true;
+ } else {
+ struct sk_buff *skb;
+
+ while (n_frames > 0) {
+ skb = skb_dequeue(&sta->tx_filtered[ac]);
+ if (!skb) {
+ skb = skb_dequeue(
+ &sta->ps_tx_buf[ac]);
+ if (skb)
+ local->total_ps_buffered--;
+ }
+ if (!skb)
+ break;
+ n_frames--;
+ found = true;
+ __skb_queue_tail(&frames, skb);
+ }
+ }
+
+ /*
+ * If the driver has data on more than one TID then
+ * certainly there's more data if we release just a
+ * single frame now (from a single TID).
+ */
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
+ hweight16(driver_release_tids) > 1) {
+ more_data = true;
+ driver_release_tids =
+ BIT(ffs(driver_release_tids) - 1);
+ break;
+ }
+ }
+
+ if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac])) {
+ more_data = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ int tid;
/*
- * Tell TX path to send this frame even though the STA may
- * still remain is PS mode after this frame exchange.
+ * For PS-Poll, this can only happen due to a race condition
+ * when we set the TIM bit and the station notices it, but
+ * before it can poll for the frame we expire it.
+ *
+ * For uAPSD, this is said in the standard (11.2.1.5 h):
+ * At each unscheduled SP for a non-AP STA, the AP shall
+ * attempt to transmit at least one MSDU or MMPDU, but no
+ * more than the value specified in the Max SP Length field
+ * in the QoS Capability element from delivery-enabled ACs,
+ * that are destined for the non-AP STA.
+ *
+ * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
*/
- info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
- sta->sta.addr, sta->sta.aid,
- skb_queue_len(&sta->ps_tx_buf));
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ /* This will evaluate to 1, 3, 5 or 7. */
+ tid = 7 - ((ffs(~ignored_acs) - 1) << 1);
- /* Use MoreData flag to indicate whether there are more
- * buffered frames for this STA */
- if (no_pending_pkts)
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
- else
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ ieee80211_send_null_response(sdata, sta, tid, reason);
+ return;
+ }
+
+ if (!driver_release_tids) {
+ struct sk_buff_head pending;
+ struct sk_buff *skb;
+ int num = 0;
+ u16 tids = 0;
+
+ skb_queue_head_init(&pending);
+
+ while ((skb = __skb_dequeue(&frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qoshdr = NULL;
+
+ num++;
+
+ /*
+ * Tell TX path to send this frame even though the
+ * STA may still remain is PS mode after this frame
+ * exchange.
+ */
+ info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE;
+
+ /*
+ * Use MoreData flag to indicate whether there are
+ * more buffered frames for this STA
+ */
+ if (more_data || !skb_queue_empty(&frames))
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control &=
+ cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
+
+ if (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))
+ qoshdr = ieee80211_get_qos_ctl(hdr);
+
+ /* set EOSP for the frame */
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
+ qoshdr && skb_queue_empty(&frames))
+ *qoshdr |= IEEE80211_QOS_CTL_EOSP;
+
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ if (qoshdr)
+ tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
+ else
+ tids |= BIT(0);
+
+ __skb_queue_tail(&pending, skb);
+ }
- ieee80211_add_pending_skb(local, skb);
+ drv_allow_buffered_frames(local, sta, tids, num,
+ reason, more_data);
- if (no_pending_pkts)
- sta_info_clear_tim_bit(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+ ieee80211_add_pending_skbs(local, &pending);
+
+ sta_info_recalc_tim(sta);
} else {
/*
- * FIXME: This can be the result of a race condition between
- * us expiring a frame and the station polling for it.
- * Should we send it a null-func frame indicating we
- * have nothing buffered for it?
+ * We need to release a frame that is buffered somewhere in the
+ * driver ... it'll have to handle that.
+ * Note that, as per the comment above, it'll also have to see
+ * if there is more than just one frame on the specific TID that
+ * we're releasing from, and it needs to set the more-data bit
+ * accordingly if we tell it that there's no more data. If we do
+ * tell it there's more data, then of course the more-data bit
+ * needs to be set anyway.
+ */
+ drv_release_buffered_frames(local, sta, driver_release_tids,
+ n_frames, reason, more_data);
+
+ /*
+ * Note that we don't recalculate the TIM bit here as it would
+ * most likely have no effect at all unless the driver told us
+ * that the TID became empty before returning here from the
+ * release function.
+ * Either way, however, when the driver tells us that the TID
+ * became empty we'll do the TIM recalculation.
*/
- printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
- "though there are no buffered frames for it\n",
- sdata->name, sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
}
+void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+{
+ u8 ignore_for_response = sta->sta.uapsd_queues;
+
+ /*
+ * If all ACs are delivery-enabled then we should reply
+ * from any of them, if only some are enabled we reply
+ * only from the non-enabled ones.
+ */
+ if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
+ ignore_for_response = 0;
+
+ ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
+ IEEE80211_FRAME_RELEASE_PSPOLL);
+}
+
+void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
+{
+ int n_frames = sta->sta.max_sp;
+ u8 delivery_enabled = sta->sta.uapsd_queues;
+
+ /*
+ * If we ever grow support for TSPEC this might happen if
+ * the TSPEC update from hostapd comes in between a trigger
+ * frame setting WLAN_STA_UAPSD in the RX path and this
+ * actually getting called.
+ */
+ if (!delivery_enabled)
+ return;
+
+ switch (sta->sta.max_sp) {
+ case 1:
+ n_frames = 2;
+ break;
+ case 2:
+ n_frames = 4;
+ break;
+ case 3:
+ n_frames = 6;
+ break;
+ case 0:
+ /* XXX: what is a good value? */
+ n_frames = 8;
+ break;
+ }
+
+ ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
+ IEEE80211_FRAME_RELEASE_UAPSD);
+}
+
void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta, bool block)
{
@@ -989,17 +1480,50 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
trace_api_sta_block_awake(sta->local, pubsta, block);
if (block)
- set_sta_flags(sta, WLAN_STA_PS_DRIVER);
- else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER))
+ set_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ else if (test_sta_flag(sta, WLAN_STA_PS_DRIVER))
ieee80211_queue_work(hw, &sta->drv_unblock_wk);
}
EXPORT_SYMBOL(ieee80211_sta_block_awake);
-void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
+void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_local *local = sta->local;
+ struct sk_buff *skb;
+ struct skb_eosp_msg_data *data;
+
+ trace_api_eosp(local, pubsta);
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb) {
+ /* too bad ... but race is better than loss */
+ clear_sta_flag(sta, WLAN_STA_SP);
+ return;
+ }
+
+ data = (void *)skb->cb;
+ memcpy(data->sta, pubsta->addr, ETH_ALEN);
+ memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN);
+ skb->pkt_type = IEEE80211_EOSP_MSG;
+ skb_queue_tail(&local->skb_queue, skb);
+ tasklet_schedule(&local->tasklet);
+}
+EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe);
+
+void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
+ u8 tid, bool buffered)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+ if (WARN_ON(tid >= STA_TID_NUM))
+ return;
+
+ if (buffered)
+ set_bit(tid, &sta->driver_buffered_tids);
+ else
+ clear_bit(tid, &sta->driver_buffered_tids);
- set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
- sta_info_set_tim_bit(sta);
+ sta_info_recalc_tim(sta);
}
-EXPORT_SYMBOL(ieee80211_sta_set_tim);
+EXPORT_SYMBOL(ieee80211_sta_set_buffered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c6ae871..556fbcc 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -19,7 +19,8 @@
/**
* enum ieee80211_sta_info_flags - Stations flags
*
- * These flags are used with &struct sta_info's @flags member.
+ * These flags are used with &struct sta_info's @flags member, but
+ * only indirectly with set_sta_flag() and friends.
*
* @WLAN_STA_AUTH: Station is authenticated.
* @WLAN_STA_ASSOC: Station is associated.
@@ -43,24 +44,33 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
- * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
- * buffers. Automatically cleared on station wake-up.
+ * @WLAN_STA_TDLS_PEER: Station is a TDLS peer.
+ * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct
+ * packets. This means the link is enabled.
+ * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
+ * keeping station in power-save mode, reply when the driver
+ * unblocks the station.
+ * @WLAN_STA_SP: Station is in a service period, so don't try to
+ * reply to other uAPSD trigger frames or PS-Poll.
*/
enum ieee80211_sta_info_flags {
- WLAN_STA_AUTH = 1<<0,
- WLAN_STA_ASSOC = 1<<1,
- WLAN_STA_PS_STA = 1<<2,
- WLAN_STA_AUTHORIZED = 1<<3,
- WLAN_STA_SHORT_PREAMBLE = 1<<4,
- WLAN_STA_ASSOC_AP = 1<<5,
- WLAN_STA_WME = 1<<6,
- WLAN_STA_WDS = 1<<7,
- WLAN_STA_CLEAR_PS_FILT = 1<<9,
- WLAN_STA_MFP = 1<<10,
- WLAN_STA_BLOCK_BA = 1<<11,
- WLAN_STA_PS_DRIVER = 1<<12,
- WLAN_STA_PSPOLL = 1<<13,
- WLAN_STA_PS_DRIVER_BUF = 1<<14,
+ WLAN_STA_AUTH,
+ WLAN_STA_ASSOC,
+ WLAN_STA_PS_STA,
+ WLAN_STA_AUTHORIZED,
+ WLAN_STA_SHORT_PREAMBLE,
+ WLAN_STA_ASSOC_AP,
+ WLAN_STA_WME,
+ WLAN_STA_WDS,
+ WLAN_STA_CLEAR_PS_FILT,
+ WLAN_STA_MFP,
+ WLAN_STA_BLOCK_BA,
+ WLAN_STA_PS_DRIVER,
+ WLAN_STA_PSPOLL,
+ WLAN_STA_TDLS_PEER,
+ WLAN_STA_TDLS_PEER_AUTH,
+ WLAN_STA_UAPSD,
+ WLAN_STA_SP,
};
#define STA_TID_NUM 16
@@ -86,6 +96,8 @@ enum ieee80211_sta_info_flags {
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
* @buf_size: reorder buffer size at receiver
+ * @failed_bar_ssn: ssn of the last failed BAR tx attempt
+ * @bar_pending: BAR needs to be re-sent
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -106,6 +118,9 @@ struct tid_ampdu_tx {
u8 stop_initiator;
bool tx_stop;
u8 buf_size;
+
+ u16 failed_bar_ssn;
+ bool bar_pending;
};
/**
@@ -123,6 +138,7 @@ struct tid_ampdu_tx {
* @dialog_token: dialog token for aggregation session
* @rcu_head: RCU head used for freeing this struct
* @reorder_lock: serializes access to reorder buffer, see below.
+ * @removed: this session is removed (but might have been found due to RCU)
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -145,6 +161,7 @@ struct tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
+ bool removed;
};
/**
@@ -158,6 +175,8 @@ struct tid_ampdu_rx {
* @work: work struct for starting/stopping aggregation
* @tid_rx_timer_expired: bitmap indicating on which TIDs the
* RX timer expired until the work for it runs
+ * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
+ * driver requested to close until the work for it runs
* @mtx: mutex to protect all TX data (except non-NULL assignments
* to tid_tx[idx], which are protected by the sta spinlock)
*/
@@ -166,6 +185,7 @@ struct sta_ampdu_mlme {
/* rx */
struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM];
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
+ unsigned long tid_rx_stop_requested[BITS_TO_LONGS(STA_TID_NUM)];
/* tx */
struct work_struct work;
struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
@@ -195,15 +215,17 @@ struct sta_ampdu_mlme {
* @last_rx_rate_flag: rx status flag of the last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
- * @flaglock: spinlock for flags accesses
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
- * @flags: STA flags, see &enum ieee80211_sta_info_flags
- * @ps_tx_buf: buffer of frames to transmit to this station
- * when it leaves power saving state
- * @tx_filtered: buffer of frames we already tried to transmit
- * but were filtered by hardware due to STA having entered
- * power saving state
+ * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
+ * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
+ * when it leaves power saving state or polls
+ * @tx_filtered: buffers (per AC) of frames we already tried to
+ * transmit but were filtered by hardware due to STA having
+ * entered power saving state, these are also delivered to
+ * the station when it leaves powersave or polls for frames
+ * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
* @rx_packets: Number of MSDUs received from this STA
* @rx_bytes: Number of bytes received from this STA
* @wep_weak_iv_count: number of weak WEP IVs received from this station
@@ -235,10 +257,12 @@ struct sta_ampdu_mlme {
* @plink_timer: peer link watch timer
* @plink_timer_was_running: used by suspend/resume to restore timers
* @debugfs: debug filesystem info
- * @sta: station information we share with the driver
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
* @lost_packets: number of consecutive lost packets
+ * @dummy: indicate a dummy station created for receiving
+ * EAP frames before association
+ * @sta: station information we share with the driver
*/
struct sta_info {
/* General information, mostly static */
@@ -251,7 +275,6 @@ struct sta_info {
struct rate_control_ref *rate_ctrl;
void *rate_ctrl_priv;
spinlock_t lock;
- spinlock_t flaglock;
struct work_struct drv_unblock_wk;
@@ -261,18 +284,14 @@ struct sta_info {
bool uploaded;
- /*
- * frequently updated, locked with own spinlock (flaglock),
- * use the accessors defined below
- */
- u32 flags;
+ /* use the accessors defined below */
+ unsigned long _flags;
- /*
- * STA powersave frame queues, no more than the internal
- * locking required.
- */
- struct sk_buff_head ps_tx_buf;
- struct sk_buff_head tx_filtered;
+ /* STA powersave lock and frame queues */
+ spinlock_t ps_lock;
+ struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
+ struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
+ unsigned long driver_buffered_tids;
/* Updated from RX path only, no locking requirements */
unsigned long rx_packets, rx_bytes;
@@ -284,7 +303,8 @@ struct sta_info {
unsigned long rx_dropped;
int last_signal;
struct ewma avg_signal;
- __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
+ /* Plus 1 for non-QoS frames */
+ __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES + 1];
/* Updated from TX status path only, no locking requirements */
unsigned long tx_filtered_count;
@@ -332,6 +352,9 @@ struct sta_info {
unsigned int lost_packets;
+ /* should be right in front of sta to be in the same cache line */
+ bool dummy;
+
/* keep last! */
struct ieee80211_sta sta;
};
@@ -344,60 +367,28 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
return NL80211_PLINK_LISTEN;
}
-static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
+static inline void set_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags |= flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
+ set_bit(flag, &sta->_flags);
}
-static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
+static inline void clear_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags &= ~flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
+ clear_bit(flag, &sta->_flags);
}
-static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
+static inline int test_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags & flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
+ return test_bit(flag, &sta->_flags);
}
-static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
- const u32 flags)
+static inline int test_and_clear_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags & flags;
- sta->flags &= ~flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
-}
-
-static inline u32 get_sta_flags(struct sta_info *sta)
-{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
+ return test_and_clear_bit(flag, &sta->_flags);
}
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
@@ -415,8 +406,8 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
#define STA_HASH(sta) (sta[5])
-/* Maximum number of frames to buffer per power saving station */
-#define STA_MAX_TX_BUFFER 128
+/* Maximum number of frames to buffer per power saving station per AC */
+#define STA_MAX_TX_BUFFER 64
/* Minimum buffered frame expiry time. If STA uses listen interval that is
* smaller than this value, the minimum value here is used instead. */
@@ -432,9 +423,15 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
static inline
void for_each_sta_info_type_check(struct ieee80211_local *local,
const u8 *addr,
@@ -455,6 +452,22 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
_sta = nxt, \
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
+ /* run code only if address matches and it's not a dummy sta */ \
+ if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
+ !_sta->dummy)
+
+#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
+ for ( /* initialise loop */ \
+ _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
+ /* typecheck */ \
+ for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
+ /* continue condition */ \
+ _sta; \
+ /* advance loop */ \
+ _sta = nxt, \
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
+ ) \
/* compare address and run code only if it matches */ \
if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
@@ -480,14 +493,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
int sta_info_insert(struct sta_info *sta);
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
int sta_info_insert_atomic(struct sta_info *sta);
+int sta_info_reinsert(struct sta_info *sta);
int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-void sta_info_set_tim_bit(struct sta_info *sta);
-void sta_info_clear_tim_bit(struct sta_info *sta);
+void sta_info_recalc_tim(struct sta_info *sta);
void sta_info_init(struct ieee80211_local *local);
void sta_info_stop(struct ieee80211_local *local);
@@ -498,5 +511,6 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
+void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 04cdbaf..1a49354 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -9,11 +9,13 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "mesh.h"
#include "led.h"
+#include "wme.h"
void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -43,6 +45,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ int ac;
/*
* This skb 'survived' a round-trip through the driver, and
@@ -63,11 +67,37 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
sta->tx_filtered_count++;
/*
+ * Clear more-data bit on filtered frames, it might be set
+ * but later frames might time out so it might have to be
+ * clear again ... It's all rather unlikely (this frame
+ * should time out first, right?) but let's not confuse
+ * peers unnecessarily.
+ */
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *p = ieee80211_get_qos_ctl(hdr);
+ int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
+
+ /*
+ * Clear EOSP if set, this could happen e.g.
+ * if an absence period (us being a P2P GO)
+ * shortens the SP.
+ */
+ if (*p & IEEE80211_QOS_CTL_EOSP)
+ *p &= ~IEEE80211_QOS_CTL_EOSP;
+ ac = ieee802_1d_to_ac[tid & 7];
+ } else {
+ ac = IEEE80211_AC_BE;
+ }
+
+ /*
* Clear the TX filter mask for this STA when sending the next
* packet. If the STA went to power save mode, this will happen
* when it wakes up for the next time.
*/
- set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
+ set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
/*
* This code races in the following way:
@@ -103,13 +133,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
* changes before calling TX status events if ordering can be
* unknown.
*/
- if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
- skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
- skb_queue_tail(&sta->tx_filtered, skb);
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
+ skb_queue_tail(&sta->tx_filtered[ac], skb);
+ sta_info_recalc_tim(sta);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+ round_jiffies(jiffies +
+ STA_INFO_CLEANUP_INTERVAL));
return;
}
- if (!test_sta_flags(sta, WLAN_STA_PS_STA) &&
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
!(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
/* Software retry the packet once */
info->flags |= IEEE80211_TX_INTFL_RETRIED;
@@ -121,18 +157,41 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
if (net_ratelimit())
wiphy_debug(local->hw.wiphy,
"dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
- skb_queue_len(&sta->tx_filtered),
- !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
#endif
dev_kfree_skb(skb);
}
+static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx || !tid_tx->bar_pending)
+ return;
+
+ tid_tx->bar_pending = false;
+ ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
+}
+
static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *) skb->data;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ sta->last_rx = jiffies;
+
+ if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u16 tid = qc[0] & 0xf;
+
+ ieee80211_check_pending_bar(sta, hdr->addr1, tid);
+ }
+
if (ieee80211_is_action(mgmt->frame_control) &&
sdata->vif.type == NL80211_IFTYPE_STATION &&
mgmt->u.action.category == WLAN_CATEGORY_HT &&
@@ -161,6 +220,114 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
}
}
+static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx)
+ return;
+
+ tid_tx->failed_bar_ssn = ssn;
+ tid_tx->bar_pending = true;
+}
+
+static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
+{
+ int len = sizeof(struct ieee80211_radiotap_header);
+
+ /* IEEE80211_RADIOTAP_RATE rate */
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ len += 2;
+
+ /* IEEE80211_RADIOTAP_TX_FLAGS */
+ len += 2;
+
+ /* IEEE80211_RADIOTAP_DATA_RETRIES */
+ len += 1;
+
+ /* IEEE80211_TX_RC_MCS */
+ if (info->status.rates[0].idx >= 0 &&
+ info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+ len += 3;
+
+ return len;
+}
+
+static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
+ *sband, struct sk_buff *skb,
+ int retry_count, int rtap_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_radiotap_header *rthdr;
+ unsigned char *pos;
+ u16 txflags;
+
+ rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
+
+ memset(rthdr, 0, rtap_len);
+ rthdr->it_len = cpu_to_le16(rtap_len);
+ rthdr->it_present =
+ cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
+ pos = (unsigned char *)(rthdr + 1);
+
+ /*
+ * XXX: Once radiotap gets the bitmap reset thing the vendor
+ * extensions proposal contains, we can actually report
+ * the whole set of tries we did.
+ */
+
+ /* IEEE80211_RADIOTAP_RATE */
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
+ *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+ /* padding for tx flags */
+ pos += 2;
+ }
+
+ /* IEEE80211_RADIOTAP_TX_FLAGS */
+ txflags = 0;
+ if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !is_multicast_ether_addr(hdr->addr1))
+ txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
+
+ if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
+
+ put_unaligned_le16(txflags, pos);
+ pos += 2;
+
+ /* IEEE80211_RADIOTAP_DATA_RETRIES */
+ /* for now report the total retry_count */
+ *pos = retry_count;
+ pos++;
+
+ /* IEEE80211_TX_RC_MCS */
+ if (info->status.rates[0].idx >= 0 &&
+ info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+ pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ pos[2] = info->status.rates[0].idx;
+ pos += 3;
+ }
+
+}
+
/*
* Use a static threshold for now, best value to be determined
* by testing ...
@@ -179,7 +346,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
u16 frag, type;
__le16 fc;
struct ieee80211_supported_band *sband;
- struct ieee80211_tx_status_rtap_hdr *rthdr;
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
struct sta_info *sta, *tmp;
@@ -187,6 +353,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
int rates_idx = -1;
bool send_to_cooked;
bool acked;
+ struct ieee80211_bar *bar;
+ u16 tid;
+ int rtap_len;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (info->status.rates[i].idx < 0) {
@@ -215,8 +384,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
continue;
+ if (info->flags & IEEE80211_TX_STATUS_EOSP)
+ clear_sta_flag(sta, WLAN_STA_SP);
+
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
- if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
* that this TX packet failed because of that.
@@ -239,10 +411,35 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
tid = qc[0] & 0xf;
ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
& IEEE80211_SCTL_SEQ);
- ieee80211_send_bar(sta->sdata, hdr->addr1,
+ ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
tid, ssn);
}
+ if (!acked && ieee80211_is_back_req(fc)) {
+ u16 control;
+
+ /*
+ * BAR failed, store the last SSN and retry sending
+ * the BAR when the next unicast transmission on the
+ * same TID succeeds.
+ */
+ bar = (struct ieee80211_bar *) skb->data;
+ control = le16_to_cpu(bar->control);
+ if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
+
+ tid = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+
+ if (local->hw.flags &
+ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
+ ieee80211_stop_tx_ba_session(&sta->sta, tid);
+ else
+ ieee80211_set_bar_pending(sta, tid, ssn);
+ }
+ }
+
if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
ieee80211_handle_filtered_frame(local, sta, skb);
rcu_read_unlock();
@@ -345,9 +542,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
local->hw_roc_skb_for_status = NULL;
}
- if (cookie == local->hw_offchan_tx_cookie)
- local->hw_offchan_tx_cookie = 0;
-
cfg80211_mgmt_tx_status(
skb->dev, cookie, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
@@ -370,44 +564,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* send frame to monitor interfaces now */
-
- if (skb_headroom(skb) < sizeof(*rthdr)) {
+ rtap_len = ieee80211_tx_radiotap_len(info);
+ if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
dev_kfree_skb(skb);
return;
}
-
- rthdr = (struct ieee80211_tx_status_rtap_hdr *)
- skb_push(skb, sizeof(*rthdr));
-
- memset(rthdr, 0, sizeof(*rthdr));
- rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
- rthdr->hdr.it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
- (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
- (1 << IEEE80211_RADIOTAP_RATE));
-
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- !is_multicast_ether_addr(hdr->addr1))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
-
- /*
- * XXX: Once radiotap gets the bitmap reset thing the vendor
- * extensions proposal contains, we can actually report
- * the whole set of tries we did.
- */
- if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
- else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
- if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
- rthdr->rate = sband->bitrates[
- info->status.rates[0].idx].bitrate / 5;
-
- /* for now report the total retry_count */
- rthdr->data_retries = retry_count;
+ ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 757e4eb..51077a9 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
@@ -101,6 +102,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
}
ctx->state = TKIP_STATE_PHASE1_DONE;
+ ctx->p1k_iv32 = tsc_IV32;
}
static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -140,60 +142,80 @@ static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
* of the IV. Returns pointer to the octet following IVs (i.e., beginning of
* the packet payload). */
-u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16)
+u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key)
{
- pos = write_tkip_iv(pos, iv16);
+ lockdep_assert_held(&key->u.tkip.txlock);
+
+ pos = write_tkip_iv(pos, key->u.tkip.tx.iv16);
*pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
put_unaligned_le32(key->u.tkip.tx.iv32, pos);
return pos + 4;
}
-void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
- struct sk_buff *skb, enum ieee80211_tkip_key_type type,
- u8 *outkey)
+static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32)
{
- struct ieee80211_key *key = (struct ieee80211_key *)
- container_of(keyconf, struct ieee80211_key, conf);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- u8 *data;
- const u8 *tk;
- struct tkip_ctx *ctx;
- u16 iv16;
- u32 iv32;
+ struct ieee80211_sub_if_data *sdata = key->sdata;
+ struct tkip_ctx *ctx = &key->u.tkip.tx;
+ const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
- data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
- iv16 = data[2] | (data[0] << 8);
- iv32 = get_unaligned_le32(&data[4]);
+ lockdep_assert_held(&key->u.tkip.txlock);
+
+ /*
+ * Update the P1K when the IV32 is different from the value it
+ * had when we last computed it (or when not initialised yet).
+ * This might flip-flop back and forth if packets are processed
+ * out-of-order due to the different ACs, but then we have to
+ * just compute the P1K more often.
+ */
+ if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT)
+ tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32);
+}
- tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
- ctx = &key->u.tkip.tx;
+void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
+ u32 iv32, u16 *p1k)
+{
+ struct ieee80211_key *key = (struct ieee80211_key *)
+ container_of(keyconf, struct ieee80211_key, conf);
+ struct tkip_ctx *ctx = &key->u.tkip.tx;
+ unsigned long flags;
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n",
- iv16, iv32);
-
- if (iv32 != ctx->iv32) {
- printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n",
- iv32, ctx->iv32);
- printk(KERN_DEBUG "Wrap around of iv16 in the middle of a "
- "fragmented packet\n");
- }
-#endif
+ spin_lock_irqsave(&key->u.tkip.txlock, flags);
+ ieee80211_compute_tkip_p1k(key, iv32);
+ memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
+ spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
+}
+EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
- /* Update the p1k only when the iv16 in the packet wraps around, this
- * might occur after the wrap around of iv16 in the key in case of
- * fragmented packets. */
- if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
- tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
+void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
+ const u8 *ta, u32 iv32, u16 *p1k)
+{
+ const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
+ struct tkip_ctx ctx;
- if (type == IEEE80211_TKIP_P1_KEY) {
- memcpy(outkey, ctx->p1k, sizeof(u16) * 5);
- return;
- }
+ tkip_mixing_phase1(tk, &ctx, ta, iv32);
+ memcpy(p1k, ctx.p1k, sizeof(ctx.p1k));
+}
+EXPORT_SYMBOL(ieee80211_get_tkip_rx_p1k);
- tkip_mixing_phase2(tk, ctx, iv16, outkey);
+void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
+ struct sk_buff *skb, u8 *p2k)
+{
+ struct ieee80211_key *key = (struct ieee80211_key *)
+ container_of(keyconf, struct ieee80211_key, conf);
+ const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
+ struct tkip_ctx *ctx = &key->u.tkip.tx;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ u32 iv32 = get_unaligned_le32(&data[4]);
+ u16 iv16 = data[2] | (data[0] << 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&key->u.tkip.txlock, flags);
+ ieee80211_compute_tkip_p1k(key, iv32);
+ tkip_mixing_phase2(tk, ctx, iv16, p2k);
+ spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
}
-EXPORT_SYMBOL(ieee80211_get_tkip_key);
+EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
/*
* Encrypt packet payload with TKIP using @key. @pos is a pointer to the
@@ -204,19 +226,15 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key);
*/
int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
struct ieee80211_key *key,
- u8 *pos, size_t payload_len, u8 *ta)
+ struct sk_buff *skb,
+ u8 *payload, size_t payload_len)
{
u8 rc4key[16];
- struct tkip_ctx *ctx = &key->u.tkip.tx;
- const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
-
- /* Calculate per-packet key */
- if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
- tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
- tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
+ ieee80211_get_tkip_p2k(&key->conf, skb, rc4key);
- return ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
+ return ieee80211_wep_encrypt_data(tfm, rc4key, 16,
+ payload, payload_len);
}
/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index 1cab9c8..e3ecb65 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -13,11 +13,13 @@
#include <linux/crypto.h>
#include "key.h"
-u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
+u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key);
int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
- struct ieee80211_key *key,
- u8 *pos, size_t payload_len, u8 *ta);
+ struct ieee80211_key *key,
+ struct sk_buff *skb,
+ u8 *payload, size_t payload_len);
+
enum {
TKIP_DECRYPT_OK = 0,
TKIP_DECRYPT_NO_EXT_IV = -1,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index da878c1..91826b6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -18,6 +18,7 @@
#include <linux/etherdevice.h>
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
@@ -253,13 +254,12 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- u32 sta_flags;
+ bool assoc = false;
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
- if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
- test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
+ if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control))
/*
@@ -278,16 +278,14 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
return TX_CONTINUE;
- if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- return TX_CONTINUE;
-
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;
- sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+ if (tx->sta)
+ assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
- if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
+ if (unlikely(!assoc &&
tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -343,13 +341,22 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
total += skb_queue_len(&ap->ps_bc_buf);
}
+ /*
+ * Drop one frame from each station from the lowest-priority
+ * AC that has frames at all.
+ */
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- skb = skb_dequeue(&sta->ps_tx_buf);
- if (skb) {
- purged++;
- dev_kfree_skb(skb);
+ int ac;
+
+ for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
+ skb = skb_dequeue(&sta->ps_tx_buf[ac]);
+ total += skb_queue_len(&sta->ps_tx_buf[ac]);
+ if (skb) {
+ purged++;
+ dev_kfree_skb(skb);
+ break;
+ }
}
- total += skb_queue_len(&sta->ps_tx_buf);
}
rcu_read_unlock();
@@ -418,7 +425,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
if (!ieee80211_is_mgmt(fc))
return 0;
- if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
+ if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
return 0;
if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
@@ -435,7 +442,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
- u32 staflags;
if (unlikely(!sta ||
ieee80211_is_probe_resp(hdr->frame_control) ||
@@ -444,57 +450,67 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
ieee80211_is_reassoc_resp(hdr->frame_control)))
return TX_CONTINUE;
- staflags = get_sta_flags(sta);
+ if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
+ !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) {
+ int ac = skb_get_queue_mapping(tx->skb);
- if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
- !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
- "before %d)\n",
- sta->sta.addr, sta->sta.aid,
- skb_queue_len(&sta->ps_tx_buf));
+ printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
+ sta->sta.addr, sta->sta.aid, ac);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
- if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
- struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
+
+ /* sync with ieee80211_sta_ps_deliver_wakeup */
+ spin_lock(&sta->ps_lock);
+ /*
+ * STA woke up the meantime and all the frames on ps_tx_buf have
+ * been queued to pending queue. No reordering can happen, go
+ * ahead and Tx the packet.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+ spin_unlock(&sta->ps_lock);
+ return TX_CONTINUE;
+ }
+
+ if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
+ struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: STA %pM TX "
- "buffer full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr);
- }
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: STA %pM TX buffer for "
+ "AC %d full - dropping oldest frame\n",
+ tx->sdata->name, sta->sta.addr, ac);
#endif
dev_kfree_skb(old);
} else
tx->local->total_ps_buffered++;
- /*
- * Queue frame to be sent after STA wakes up/polls,
- * but don't set the TIM bit if the driver is blocking
- * wakeup or poll response transmissions anyway.
- */
- if (skb_queue_empty(&sta->ps_tx_buf) &&
- !(staflags & WLAN_STA_PS_DRIVER))
- sta_info_set_tim_bit(sta);
-
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- skb_queue_tail(&sta->ps_tx_buf, tx->skb);
+ skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
+ spin_unlock(&sta->ps_lock);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
+ /*
+ * We queued up some frames, so the TIM bit might
+ * need to be set, recalculate it.
+ */
+ sta_info_recalc_tim(sta);
+
return TX_QUEUED;
}
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- else if (unlikely(staflags & WLAN_STA_PS_STA)) {
- printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
- "set -> send frame\n", tx->sdata->name,
- sta->sta.addr);
+ else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
+ printk(KERN_DEBUG
+ "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
+ tx->sdata->name, sta->sta.addr);
}
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -518,9 +534,11 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
- tx->sdata->control_port_no_encrypt))
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
+ if (tx->sdata->control_port_no_encrypt)
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+ }
return TX_CONTINUE;
}
@@ -552,7 +570,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(!ieee80211_is_robust_mgmt_frame(hdr) ||
(ieee80211_is_action(hdr->frame_control) &&
- tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
+ tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
return TX_DROP;
} else
@@ -589,6 +607,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
break;
}
+ if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED))
+ return TX_DROP;
+
if (!skip_hw && tx->key &&
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
info->control.hw_key = &tx->key->conf;
@@ -608,7 +629,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
- u32 sta_flags;
+ bool assoc = false;
memset(&txrc, 0, sizeof(txrc));
@@ -644,17 +665,17 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
*/
if (tx->sdata->vif.bss_conf.use_short_preamble &&
(ieee80211_is_data(hdr->frame_control) ||
- (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
+ (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
txrc.short_preamble = short_preamble = true;
- sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+ if (tx->sta)
+ assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
/*
* Lets not bother rate control if we're associated and cannot
* talk to the sta. This should not happen.
*/
- if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) &&
- (sta_flags & WLAN_STA_ASSOC) &&
+ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
!rate_usable_index_exists(sband, &tx->sta->sta),
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
@@ -797,6 +818,9 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
if (ieee80211_hdrlen(hdr->frame_control) < 24)
return TX_CONTINUE;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ return TX_CONTINUE;
+
/*
* Anything but QoS data that has a sequence number field
* (is long enough) gets a sequence number from the global
@@ -874,7 +898,7 @@ static int ieee80211_fragment(struct ieee80211_local *local,
pos += fraglen;
}
- skb->len = hdrlen + per_fragm;
+ skb_trim(skb, hdrlen + per_fragm);
return 0;
}
@@ -888,7 +912,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
int hdrlen;
int fragnum;
- if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
+ if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
+ return TX_CONTINUE;
+
+ if (tx->local->ops->set_frag_threshold)
return TX_CONTINUE;
/*
@@ -901,7 +928,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- /* internal error, why is TX_FRAGMENTED set? */
+ /* internal error, why isn't DONTFRAG set? */
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
return TX_DROP;
@@ -1022,100 +1049,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
/* actual transmit path */
-/*
- * deal with packet injection down monitor interface
- * with Radiotap Header -- only called for monitor mode interface
- */
-static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
- struct sk_buff *skb)
-{
- /*
- * this is the moment to interpret and discard the radiotap header that
- * must be at the start of the packet injected in Monitor mode
- *
- * Need to take some care with endian-ness since radiotap
- * args are little-endian
- */
-
- struct ieee80211_radiotap_iterator iterator;
- struct ieee80211_radiotap_header *rthdr =
- (struct ieee80211_radiotap_header *) skb->data;
- bool hw_frag;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
- NULL);
-
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- tx->flags &= ~IEEE80211_TX_FRAGMENTED;
-
- /* packet is fragmented in HW if we have a non-NULL driver callback */
- hw_frag = (tx->local->ops->set_frag_threshold != NULL);
-
- /*
- * for every radiotap entry that is present
- * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
- * entries present, or -EINVAL on error)
- */
-
- while (!ret) {
- ret = ieee80211_radiotap_iterator_next(&iterator);
-
- if (ret)
- continue;
-
- /* see if this argument is something we can use */
- switch (iterator.this_arg_index) {
- /*
- * You must take care when dereferencing iterator.this_arg
- * for multibyte types... the pointer is not aligned. Use
- * get_unaligned((type *)iterator.this_arg) to dereference
- * iterator.this_arg for type "type" safely on all arches.
- */
- case IEEE80211_RADIOTAP_FLAGS:
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
- /*
- * this indicates that the skb we have been
- * handed has the 32-bit FCS CRC at the end...
- * we should react to that by snipping it off
- * because it will be recomputed and added
- * on transmission
- */
- if (skb->len < (iterator._max_length + FCS_LEN))
- return false;
-
- skb_trim(skb, skb->len - FCS_LEN);
- }
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
- info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
- if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
- !hw_frag)
- tx->flags |= IEEE80211_TX_FRAGMENTED;
- break;
-
- /*
- * Please update the file
- * Documentation/networking/mac80211-injection.txt
- * when parsing new fields here.
- */
-
- default:
- break;
- }
- }
-
- if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
- return false;
-
- /*
- * remove the radiotap header
- * iterator->_max_length was sanity-checked against
- * skb->len by iterator init
- */
- skb_pull(skb, iterator._max_length);
-
- return true;
-}
-
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
struct sk_buff *skb,
struct ieee80211_tx_info *info,
@@ -1180,7 +1113,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int hdrlen, tid;
+ int tid;
u8 *qc;
memset(tx, 0, sizeof(*tx));
@@ -1188,26 +1121,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->local = local;
tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
- /*
- * Set this flag (used below to indicate "automatic fragmentation"),
- * it will be cleared/left by radiotap as desired.
- * Only valid when fragmentation is done by the stack.
- */
- if (!local->ops->set_frag_threshold)
- tx->flags |= IEEE80211_TX_FRAGMENTED;
-
- /* process and remove the injection radiotap header */
- if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
- if (!__ieee80211_parse_tx_radiotap(tx, skb))
- return TX_DROP;
-
- /*
- * __ieee80211_parse_tx_radiotap has now removed
- * the radiotap header that was present and pre-filled
- * 'tx' with tx control information.
- */
- info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
- }
/*
* If this flag is set to true anywhere, and we get here,
@@ -1230,7 +1143,9 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = sta_info_get(sdata, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
- (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
+ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+ (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
+ !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
struct tid_ampdu_tx *tid_tx;
qc = ieee80211_get_qos_ctl(hdr);
@@ -1255,29 +1170,25 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->flags |= IEEE80211_TX_UNICAST;
if (unlikely(local->wifi_wme_noack_test))
info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else
- info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
+ /*
+ * Flags are initialized to 0. Hence, no need to
+ * explicitly unset IEEE80211_TX_CTL_NO_ACK since
+ * it might already be set for injected frames.
+ */
}
- if (tx->flags & IEEE80211_TX_FRAGMENTED) {
- if ((tx->flags & IEEE80211_TX_UNICAST) &&
- skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
- !(info->flags & IEEE80211_TX_CTL_AMPDU))
- tx->flags |= IEEE80211_TX_FRAGMENTED;
- else
- tx->flags &= ~IEEE80211_TX_FRAGMENTED;
+ if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
+ if (!(tx->flags & IEEE80211_TX_UNICAST) ||
+ skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
+ info->flags & IEEE80211_TX_CTL_AMPDU)
+ info->flags |= IEEE80211_TX_CTL_DONTFRAG;
}
if (!tx->sta)
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+ else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
- u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
- tx->ethertype = (pos[0] << 8) | pos[1];
- }
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
return TX_CONTINUE;
@@ -1475,28 +1386,19 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
/* device xmit handlers */
-static int ieee80211_skb_resize(struct ieee80211_local *local,
+static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
int head_need, bool may_encrypt)
{
+ struct ieee80211_local *local = sdata->local;
int tail_need = 0;
- /*
- * This could be optimised, devices that do full hardware
- * crypto (including TKIP MMIC) need no tailroom... But we
- * have no drivers for such devices currently.
- */
- if (may_encrypt) {
+ if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);
}
- if (head_need || tail_need) {
- /* Sorry. Can't account for this any more */
- skb_orphan(skb);
- }
-
if (skb_cloned(skb))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
@@ -1510,67 +1412,19 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
return -ENOMEM;
}
- /* update truesize too */
- skb->truesize += head_need + tail_need;
-
return 0;
}
-static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_sub_if_data *tmp_sdata;
int headroom;
bool may_encrypt;
rcu_read_lock();
- if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
- int hdrlen;
- u16 len_rthdr;
-
- info->flags |= IEEE80211_TX_CTL_INJECTED |
- IEEE80211_TX_INTFL_HAS_RADIOTAP;
-
- len_rthdr = ieee80211_get_radiotap_len(skb->data);
- hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-
- /* check the header is complete in the frame */
- if (likely(skb->len >= len_rthdr + hdrlen)) {
- /*
- * We process outgoing injected frames that have a
- * local address we handle as though they are our
- * own frames.
- * This code here isn't entirely correct, the local
- * MAC address is not necessarily enough to find
- * the interface to use; for that proper VLAN/WDS
- * support we will need a different mechanism.
- */
-
- list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
- list) {
- if (!ieee80211_sdata_running(tmp_sdata))
- continue;
- if (tmp_sdata->vif.type ==
- NL80211_IFTYPE_MONITOR ||
- tmp_sdata->vif.type ==
- NL80211_IFTYPE_AP_VLAN ||
- tmp_sdata->vif.type ==
- NL80211_IFTYPE_WDS)
- continue;
- if (compare_ether_addr(tmp_sdata->vif.addr,
- hdr->addr2) == 0) {
- sdata = tmp_sdata;
- break;
- }
- }
- }
- }
-
may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
headroom = local->tx_headroom;
@@ -1579,7 +1433,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
headroom -= skb_headroom(skb);
headroom = max_t(int, 0, headroom);
- if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
+ if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
dev_kfree_skb(skb);
rcu_read_unlock();
return;
@@ -1597,11 +1451,94 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
return;
}
- ieee80211_set_qos_hdr(local, skb);
+ ieee80211_set_qos_hdr(sdata, skb);
ieee80211_tx(sdata, skb, false);
rcu_read_unlock();
}
+static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
+{
+ struct ieee80211_radiotap_iterator iterator;
+ struct ieee80211_radiotap_header *rthdr =
+ (struct ieee80211_radiotap_header *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ NULL);
+ u16 txflags;
+
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_DONTFRAG;
+
+ /*
+ * for every radiotap entry that is present
+ * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
+ * entries present, or -EINVAL on error)
+ */
+
+ while (!ret) {
+ ret = ieee80211_radiotap_iterator_next(&iterator);
+
+ if (ret)
+ continue;
+
+ /* see if this argument is something we can use */
+ switch (iterator.this_arg_index) {
+ /*
+ * You must take care when dereferencing iterator.this_arg
+ * for multibyte types... the pointer is not aligned. Use
+ * get_unaligned((type *)iterator.this_arg) to dereference
+ * iterator.this_arg for type "type" safely on all arches.
+ */
+ case IEEE80211_RADIOTAP_FLAGS:
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
+ /*
+ * this indicates that the skb we have been
+ * handed has the 32-bit FCS CRC at the end...
+ * we should react to that by snipping it off
+ * because it will be recomputed and added
+ * on transmission
+ */
+ if (skb->len < (iterator._max_length + FCS_LEN))
+ return false;
+
+ skb_trim(skb, skb->len - FCS_LEN);
+ }
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
+ info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
+ info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
+ break;
+
+ case IEEE80211_RADIOTAP_TX_FLAGS:
+ txflags = get_unaligned_le16(iterator.this_arg);
+ if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ break;
+
+ /*
+ * Please update the file
+ * Documentation/networking/mac80211-injection.txt
+ * when parsing new fields here.
+ */
+
+ default:
+ break;
+ }
+ }
+
+ if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
+ return false;
+
+ /*
+ * remove the radiotap header
+ * iterator->_max_length was sanity-checked against
+ * skb->len by iterator init
+ */
+ skb_pull(skb, iterator._max_length);
+
+ return true;
+}
+
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1610,7 +1547,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct ieee80211_radiotap_header *prthdr =
(struct ieee80211_radiotap_header *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sub_if_data *tmp_sdata, *sdata;
u16 len_rthdr;
+ int hdrlen;
/*
* Frame injection is not allowed if beaconing is not allowed
@@ -1661,12 +1601,65 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb_set_network_header(skb, len_rthdr);
skb_set_transport_header(skb, len_rthdr);
+ if (skb->len < len_rthdr + 2)
+ goto fail;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (skb->len < len_rthdr + hdrlen)
+ goto fail;
+
+ /*
+ * Initialize skb->protocol if the injected frame is a data frame
+ * carrying a rfc1042 header
+ */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
+ u8 *payload = (u8 *)hdr + hdrlen;
+
+ if (compare_ether_addr(payload, rfc1042_header) == 0)
+ skb->protocol = cpu_to_be16((payload[6] << 8) |
+ payload[7]);
+ }
+
memset(info, 0, sizeof(*info));
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_CTL_INJECTED;
+
+ /* process and remove the injection radiotap header */
+ if (!ieee80211_parse_tx_radiotap(skb))
+ goto fail;
+
+ rcu_read_lock();
+
+ /*
+ * We process outgoing injected frames that have a local address
+ * we handle as though they are non-injected frames.
+ * This code here isn't entirely correct, the local MAC address
+ * isn't always enough to find the interface to use; for proper
+ * VLAN/WDS support we will need a different mechanism (which
+ * likely isn't going to be monitor interfaces).
+ */
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(tmp_sdata))
+ continue;
+ if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
+ continue;
+ if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
+ sdata = tmp_sdata;
+ break;
+ }
+ }
+
+ ieee80211_xmit(sdata, skb);
+ rcu_read_unlock();
- /* pass the radiotap header up to xmit */
- ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
return NETDEV_TX_OK;
fail:
@@ -1705,8 +1698,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
+ bool wme_sta = false, authorized = false, tdls_auth = false;
struct sk_buff *tmp_skb;
+ bool tdls_direct = false;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
@@ -1730,7 +1724,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
- sta_flags = get_sta_flags(sta);
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
}
rcu_read_unlock();
if (sta)
@@ -1818,11 +1813,50 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
break;
#endif
case NL80211_IFTYPE_STATION:
- memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
- if (sdata->u.mgd.use_4addr &&
- cpu_to_be16(ethertype) != sdata->control_port_protocol) {
- fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
+ if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ bool tdls_peer = false;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, skb->data);
+ if (sta) {
+ authorized = test_sta_flag(sta,
+ WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ tdls_peer = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER);
+ tdls_auth = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER_AUTH);
+ }
+ rcu_read_unlock();
+
+ /*
+ * If the TDLS link is enabled, send everything
+ * directly. Otherwise, allow TDLS setup frames
+ * to be transmitted indirectly.
+ */
+ tdls_direct = tdls_peer && (tdls_auth ||
+ !(ethertype == ETH_P_TDLS && skb->len > 14 &&
+ skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
+ }
+
+ if (tdls_direct) {
+ /* link during setup - throw out frames to peer */
+ if (!tdls_auth) {
+ ret = NETDEV_TX_OK;
+ goto fail;
+ }
+
+ /* DA SA BSSID */
+ memcpy(hdr.addr1, skb->data, ETH_ALEN);
+ memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+ memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ hdrlen = 24;
+ } else if (sdata->u.mgd.use_4addr &&
+ cpu_to_be16(ethertype) != sdata->control_port_protocol) {
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS);
/* RA TA DA SA */
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1830,6 +1864,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
} else {
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
@@ -1855,13 +1890,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!is_multicast_ether_addr(hdr.addr1)) {
rcu_read_lock();
sta = sta_info_get(sdata, hdr.addr1);
- if (sta)
- sta_flags = get_sta_flags(sta);
+ if (sta) {
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ }
rcu_read_unlock();
}
+ /* For mesh, the use of the QoS header is mandatory */
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ wme_sta = true;
+
/* receiver and we are QoS enabled, use a QoS type frame */
- if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
+ if (wme_sta && local->hw.queues >= 4) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -1870,12 +1911,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* Drop unicast frames to unauthorised stations unless they are
* EAPOL frames from the local station.
*/
- if (!ieee80211_vif_is_mesh(&sdata->vif) &&
- unlikely(!is_multicast_ether_addr(hdr.addr1) &&
- !(sta_flags & WLAN_STA_AUTHORIZED) &&
- !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
- compare_ether_addr(sdata->vif.addr,
- skb->data + ETH_ALEN) == 0))) {
+ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
+ !is_multicast_ether_addr(hdr.addr1) && !authorized &&
+ (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
+ compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: dropped frame to %pM"
@@ -1946,7 +1985,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
head_need += IEEE80211_ENCRYPT_HEADROOM;
head_need += local->tx_headroom;
head_need = max_t(int, 0, head_need);
- if (ieee80211_skb_resize(local, skb, head_need, true))
+ if (ieee80211_skb_resize(sdata, skb, head_need, true))
goto fail;
}
@@ -2277,13 +2316,23 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
- mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
+ mgmt->u.beacon.capab_info |= cpu_to_le16(
+ sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
pos = skb_put(skb, 2);
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- mesh_mgmt_ies_add(skb, sdata);
+ if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ mesh_add_ds_params_ie(skb, sdata) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_meshid_ie(skb, sdata) ||
+ mesh_add_meshconf_ie(skb, sdata) ||
+ mesh_add_vendor_ies(skb, sdata)) {
+ pr_err("o11s: couldn't add ies!\n");
+ goto out;
+ }
} else {
WARN_ON(1);
goto out;
@@ -2337,11 +2386,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "pspoll template\n", sdata->name);
+ if (!skb)
return NULL;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
@@ -2377,11 +2424,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
- "template\n", sdata->name);
+ if (!skb)
return NULL;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
@@ -2416,11 +2461,8 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
ie_ssid_len + ie_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
- "request template\n", sdata->name);
+ if (!skb)
return NULL;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 11d9d49..7095ae5 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -13,13 +13,13 @@
#include <net/mac80211.h>
#include <linux/netdevice.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/bitmap.h>
-#include <linux/crc32.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
@@ -368,14 +368,14 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data)
+void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
unsigned long flags;
- int queue, ret = 0, i;
+ int queue, i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < hw->queues; i++)
@@ -390,7 +390,6 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
continue;
}
- ret++;
queue = skb_get_queue_mapping(skb);
__skb_queue_tail(&local->pending[queue], skb);
}
@@ -402,14 +401,12 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-
- return ret;
}
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
{
- return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
+ ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
}
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
@@ -573,172 +570,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
}
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- u64 filter, u32 crc)
-{
- size_t left = len;
- u8 *pos = start;
- bool calc_crc = filter != 0;
-
- memset(elems, 0, sizeof(*elems));
- elems->ie_start = start;
- elems->total_len = len;
-
- while (left >= 2) {
- u8 id, elen;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left)
- break;
-
- if (calc_crc && id < 64 && (filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- switch (id) {
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_FH_PARAMS:
- elems->fh_params = pos;
- elems->fh_params_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- elems->ds_params = pos;
- elems->ds_params_len = elen;
- break;
- case WLAN_EID_CF_PARAMS:
- elems->cf_params = pos;
- elems->cf_params_len = elen;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- }
- break;
- case WLAN_EID_IBSS_PARAMS:
- elems->ibss_params = pos;
- elems->ibss_params_len = elen;
- break;
- case WLAN_EID_CHALLENGE:
- elems->challenge = pos;
- elems->challenge_len = elen;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (pos[3] == 1) {
- /* OUI Type 1 - WPA IE */
- elems->wpa = pos;
- elems->wpa_len = elen;
- } else if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- elems->erp_info = pos;
- elems->erp_info_len = elen;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- break;
- case WLAN_EID_HT_INFORMATION:
- if (elen >= sizeof(struct ieee80211_ht_info))
- elems->ht_info_elem = (void *)pos;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- break;
- case WLAN_EID_PEER_LINK:
- elems->peer_link = pos;
- elems->peer_link_len = elen;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- elems->ch_switch_elem = pos;
- elems->ch_switch_elem_len = elen;
- break;
- case WLAN_EID_QUIET:
- if (!elems->quiet_elem) {
- elems->quiet_elem = pos;
- elems->quiet_elem_len = elen;
- }
- elems->num_of_quiet_elem++;
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- elems->pwr_constr_elem = pos;
- elems->pwr_constr_elem_len = elen;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- elems->timeout_int = pos;
- elems->timeout_int_len = elen;
- break;
- default:
- break;
- }
-
- left -= elen;
- pos += elen;
- }
-
- return crc;
-}
-
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -799,7 +630,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
qparam.uapsd = false;
- drv_conf_tx(local, queue, &qparam);
+ sdata->tx_conf[queue] = qparam;
+ drv_conf_tx(local, sdata, queue, &qparam);
}
/* after reinitialize QoS TX queues setting to default,
@@ -873,11 +705,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + 6 + extra_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
- "frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
@@ -1016,9 +846,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
}
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
- u8 *dst,
+ u8 *dst, u32 ratemask,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len)
+ const u8 *ie, size_t ie_len,
+ bool directed)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
@@ -1029,20 +860,23 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
/* FIXME: come up with a proper value */
buf = kmalloc(200 + ie_len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_DEBUG "%s: failed to allocate temporary IE "
- "buffer\n", sdata->name);
+ if (!buf)
return NULL;
- }
- chan = ieee80211_frequency_to_channel(
- local->hw.conf.channel->center_freq);
+ /*
+ * Do not send DS Channel parameter for directed probe requests
+ * in order to maximize the chance that we get a response. Some
+ * badly-behaved APs don't respond when this parameter is included.
+ */
+ if (directed)
+ chan = 0;
+ else
+ chan = ieee80211_frequency_to_channel(
+ local->hw.conf.channel->center_freq);
buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
local->hw.conf.channel->band,
- sdata->rc_rateidx_mask
- [local->hw.conf.channel->band],
- chan);
+ ratemask, chan);
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len,
@@ -1066,13 +900,19 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len)
+ const u8 *ie, size_t ie_len,
+ u32 ratemask, bool directed, bool no_cck)
{
struct sk_buff *skb;
- skb = ieee80211_build_probe_req(sdata, dst, ssid, ssid_len, ie, ie_len);
- if (skb)
+ skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
+ ie, ie_len, directed);
+ if (skb) {
+ if (no_cck)
+ IEEE80211_SKB_CB(skb)->flags |=
+ IEEE80211_TX_CTL_NO_CCK_RATE;
ieee80211_tx_skb(sdata, skb);
+ }
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1127,7 +967,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
- int res;
+ int res, i;
#ifdef CONFIG_PM
if (local->suspended)
@@ -1150,27 +990,37 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
#endif
- /* restart hardware */
- if (local->open_count) {
- /*
- * Upon resume hardware can sometimes be goofy due to
- * various platform / driver / bus issues, so restarting
- * the device may at times not work immediately. Propagate
- * the error.
- */
- res = drv_start(local);
- if (res) {
- WARN(local->suspended, "Hardware became unavailable "
- "upon resume. This could be a software issue "
- "prior to suspend or a hardware issue.\n");
- return res;
- }
+ /* setup fragmentation threshold */
+ drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+
+ /* setup RTS threshold */
+ drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
- ieee80211_led_radio(local, true);
- ieee80211_mod_tpt_led_trig(local,
- IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
+ /* reset coverage class */
+ drv_set_coverage_class(local, hw->wiphy->coverage_class);
+
+ /* everything else happens only if HW was up & running */
+ if (!local->open_count)
+ goto wake_up;
+
+ /*
+ * Upon resume hardware can sometimes be goofy due to
+ * various platform / driver / bus issues, so restarting
+ * the device may at times not work immediately. Propagate
+ * the error.
+ */
+ res = drv_start(local);
+ if (res) {
+ WARN(local->suspended, "Hardware became unavailable "
+ "upon resume. This could be a software issue "
+ "prior to suspend or a hardware issue.\n");
+ return res;
}
+ ieee80211_led_radio(local, true);
+ ieee80211_mod_tpt_led_trig(local,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
+
/* add interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
@@ -1194,11 +1044,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
mutex_unlock(&local->sta_mtx);
- /* setup fragmentation threshold */
- drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+ /* reconfigure tx conf */
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ !ieee80211_sdata_running(sdata))
+ continue;
- /* setup RTS threshold */
- drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
+ for (i = 0; i < hw->queues; i++)
+ drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
+ }
/* reconfigure hardware */
ieee80211_hw_config(local, ~0);
@@ -1234,6 +1089,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
+ changed |= BSS_CHANGED_SSID;
+ /* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED;
@@ -1275,7 +1132,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sta, &local->sta_list, list) {
ieee80211_sta_tear_down_BA_sessions(sta, true);
- clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
}
mutex_unlock(&local->sta_mtx);
@@ -1325,6 +1182,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
return 0;
}
+void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+ struct ieee80211_key *key;
+
+ if (WARN_ON(!vif))
+ return;
+
+ sdata = vif_to_sdata(vif);
+ local = sdata->local;
+
+ if (WARN_ON(!local->resuming))
+ return;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return;
+
+ sdata->flags |= IEEE80211_SDATA_DISCONNECT_RESUME;
+
+ mutex_lock(&local->key_mtx);
+ list_for_each_entry(key, &sdata->key_list, list)
+ key->flags |= KEY_FLAG_TAINTED;
+ mutex_unlock(&local->key_mtx);
+}
+EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
+
static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
enum ieee80211_smps_mode *smps_mode)
{
@@ -1441,3 +1325,100 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
return pos;
}
+
+static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata,
+ int rssi_min_thold,
+ int rssi_max_thold)
+{
+ trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold);
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ return;
+
+ /*
+ * Scale up threshold values before storing it, as the RSSI averaging
+ * algorithm uses a scaled up value as well. Change this scaling
+ * factor if the RSSI averaging algorithm changes.
+ */
+ sdata->u.mgd.rssi_min_thold = rssi_min_thold*16;
+ sdata->u.mgd.rssi_max_thold = rssi_max_thold*16;
+}
+
+void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
+ int rssi_min_thold,
+ int rssi_max_thold)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ WARN_ON(rssi_min_thold == rssi_max_thold ||
+ rssi_min_thold > rssi_max_thold);
+
+ _ieee80211_enable_rssi_reports(sdata, rssi_min_thold,
+ rssi_max_thold);
+}
+EXPORT_SYMBOL(ieee80211_enable_rssi_reports);
+
+void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ _ieee80211_enable_rssi_reports(sdata, 0, 0);
+}
+EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ int rate;
+ u8 i, rates, *pos;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ rates = sband->n_bitrates;
+ if (rates > 8)
+ rates = 8;
+
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOMEM;
+
+ pos = skb_put(skb, rates + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = rates;
+ for (i = 0; i < rates; i++) {
+ rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+
+ return 0;
+}
+
+int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ int rate;
+ u8 i, exrates, *pos;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ exrates = sband->n_bitrates;
+ if (exrates > 8)
+ exrates -= 8;
+ else
+ exrates = 0;
+
+ if (skb_tailroom(skb) < exrates + 2)
+ return -ENOMEM;
+
+ if (exrates) {
+ pos = skb_put(skb, exrates + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = exrates;
+ for (i = 8; i < sband->n_bitrates; i++) {
+ rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+ return 0;
+}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a1c6bfd..34583c5 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -97,8 +97,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
- skb_headroom(skb) < WEP_IV_LEN))
+ if (WARN_ON(skb_headroom(skb) < WEP_IV_LEN))
return NULL;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -160,6 +159,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
size_t len;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
+ if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN))
+ return -1;
+
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
if (!iv)
return -1;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 28bc084..fd52e69 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -72,7 +72,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
sta = rcu_dereference(sdata->u.vlan.sta);
if (sta) {
- qos = get_sta_flags(sta) & WLAN_STA_WME;
+ qos = test_sta_flag(sta, WLAN_STA_WME);
break;
}
case NL80211_IFTYPE_AP:
@@ -83,11 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- /*
- * XXX: This is clearly broken ... but already was before,
- * because ieee80211_fill_mesh_addresses() would clear A1
- * except for multicast addresses.
- */
+ ra = skb->data;
break;
#endif
case NL80211_IFTYPE_STATION:
@@ -103,7 +99,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (!sta && ra && !is_multicast_ether_addr(ra)) {
sta = sta_info_get(sdata, ra);
if (sta)
- qos = get_sta_flags(sta) & WLAN_STA_WME;
+ qos = test_sta_flag(sta, WLAN_STA_WME);
}
rcu_read_unlock();
@@ -139,7 +135,8 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
return ieee802_1d_to_ac[skb->priority];
}
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
+void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -150,11 +147,11 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- if (unlikely(local->wifi_wme_noack_test))
- ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
- QOS_CONTROL_ACK_POLICY_SHIFT;
- /* qos header is 2 bytes, second reserved */
+ if (unlikely(sdata->local->wifi_wme_noack_test))
+ ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
+ /* qos header is 2 bytes */
*p++ = ack_policy | tid;
- *p = 0;
+ *p = ieee80211_vif_is_mesh(&sdata->vif) ?
+ (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
}
}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 6053b1c..34e166f 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -13,16 +13,12 @@
#include <linux/netdevice.h>
#include "ieee80211_i.h"
-#define QOS_CONTROL_ACK_POLICY_NORMAL 0
-#define QOS_CONTROL_ACK_POLICY_NOACK 1
-
-#define QOS_CONTROL_ACK_POLICY_SHIFT 5
-
extern const int ieee802_1d_to_ac[8];
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
+void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
struct sk_buff *skb);
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c9acfda..99165ef 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -25,6 +25,7 @@
#include "ieee80211_i.h"
#include "rate.h"
+#include "driver-ops.h"
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
#define IEEE80211_AUTH_MAX_TRIES 3
@@ -228,11 +229,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
wk->ie_len + /* extra IEs */
9, /* WMM */
GFP_KERNEL);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
- "frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
capab = WLAN_CAPABILITY_ESS;
@@ -427,6 +426,14 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
+ if (!wk->probe_auth.synced) {
+ int ret = drv_tx_sync(local, sdata, wk->filter_ta,
+ IEEE80211_TX_SYNC_AUTH);
+ if (ret)
+ return WORK_ACT_TIMEOUT;
+ }
+ wk->probe_auth.synced = true;
+
wk->probe_auth.tries++;
if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
@@ -450,7 +457,8 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
* will not answer to direct packet in unassociated state.
*/
ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
- wk->probe_auth.ssid_len, NULL, 0);
+ wk->probe_auth.ssid_len, NULL, 0,
+ (u32) -1, true, false);
wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
run_again(local, wk->timeout);
@@ -465,6 +473,14 @@ ieee80211_authenticate(struct ieee80211_work *wk)
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
+ if (!wk->probe_auth.synced) {
+ int ret = drv_tx_sync(local, sdata, wk->filter_ta,
+ IEEE80211_TX_SYNC_AUTH);
+ if (ret)
+ return WORK_ACT_TIMEOUT;
+ }
+ wk->probe_auth.synced = true;
+
wk->probe_auth.tries++;
if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
printk(KERN_DEBUG "%s: authentication with %pM"
@@ -498,6 +514,14 @@ ieee80211_associate(struct ieee80211_work *wk)
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
+ if (!wk->assoc.synced) {
+ int ret = drv_tx_sync(local, sdata, wk->filter_ta,
+ IEEE80211_TX_SYNC_ASSOC);
+ if (ret)
+ return WORK_ACT_TIMEOUT;
+ }
+ wk->assoc.synced = true;
+
wk->assoc.tries++;
if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
printk(KERN_DEBUG "%s: association with %pM"
@@ -875,26 +899,6 @@ static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
return false;
}
-static enum nl80211_channel_type
-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
- enum nl80211_channel_type oper_ct)
-{
- switch (wk_ct) {
- case NL80211_CHAN_NO_HT:
- return oper_ct;
- case NL80211_CHAN_HT20:
- if (oper_ct != NL80211_CHAN_NO_HT)
- return oper_ct;
- return wk_ct;
- case NL80211_CHAN_HT40MINUS:
- case NL80211_CHAN_HT40PLUS:
- return wk_ct;
- }
- WARN_ON(1); /* shouldn't get here */
- return wk_ct;
-}
-
-
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
@@ -945,50 +949,18 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!started && !local->tmp_channel) {
- bool on_oper_chan;
- bool tmp_chan_changed = false;
- bool on_oper_chan2;
- enum nl80211_channel_type wk_ct;
- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
- /* Work with existing channel type if possible. */
- wk_ct = wk->chan_type;
- if (wk->chan == local->hw.conf.channel)
- wk_ct = ieee80211_calc_ct(wk->chan_type,
- local->hw.conf.channel_type);
-
- if (local->tmp_channel)
- if ((local->tmp_channel != wk->chan) ||
- (local->tmp_channel_type != wk_ct))
- tmp_chan_changed = true;
-
- local->tmp_channel = wk->chan;
- local->tmp_channel_type = wk_ct;
/*
- * Leave the station vifs in awake mode if they
- * happen to be on the same channel as
- * the requested channel.
+ * TODO: could optimize this by leaving the
+ * station vifs in awake mode if they
+ * happen to be on the same channel as
+ * the requested channel
*/
- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
- if (on_oper_chan != on_oper_chan2) {
- if (on_oper_chan2) {
- /* going off oper channel, PS too */
- ieee80211_offchannel_stop_vifs(local);
- ieee80211_hw_config(local, 0);
- } else {
- /* going on channel, but leave PS
- * off-channel. */
- ieee80211_hw_config(local, 0);
- ieee80211_offchannel_return(local,
- true);
- }
- } else if (tmp_chan_changed)
- /* Still off-channel, but on some other
- * channel, so update hardware.
- * PS should already be off-channel.
- */
- ieee80211_hw_config(local, 0);
+ ieee80211_offchannel_stop_beaconing(local);
+ ieee80211_offchannel_stop_station(local);
+ local->tmp_channel = wk->chan;
+ local->tmp_channel_type = wk->chan_type;
+ ieee80211_hw_config(local, 0);
started = true;
wk->timeout = jiffies;
}
@@ -1074,8 +1046,7 @@ static void ieee80211_work_work(struct work_struct *work)
* we still need to do a hardware config. Currently,
* we cannot be here while scanning, however.
*/
- if (!ieee80211_cfg_on_oper_channel(local))
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_config(local, 0);
/* At the least, we need to disable offchannel_ps,
* so just go ahead and run the entire offchannel
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index d9e03cf..a582504 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -15,6 +15,7 @@
#include <linux/gfp.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
+#include <crypto/aes.h>
#include "ieee80211_i.h"
#include "michael.h"
@@ -52,7 +53,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
}
if (info->control.hw_key &&
- !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
+ (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
+ tx->local->ops->set_frag_threshold) &&
!(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
/* hwaccel - with no need for SW-generated MMIC */
return TX_CONTINUE;
@@ -86,11 +88,6 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int queue = rx->queue;
-
- /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
- if (rx->queue == NUM_RX_DATA_QUEUES - 1)
- queue = 0;
/*
* it makes no sense to check for MIC errors on anything other
@@ -154,8 +151,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
update_iv:
/* update IV in key information to be able to detect replays */
- rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
- rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
+ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
+ rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
return RX_CONTINUE;
@@ -177,6 +174,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_key *key = tx->key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ unsigned long flags;
unsigned int hdrlen;
int len, tail;
u8 *pos;
@@ -204,11 +202,12 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos += hdrlen;
/* Increase IV for the frame */
+ spin_lock_irqsave(&key->u.tkip.txlock, flags);
key->u.tkip.tx.iv16++;
if (key->u.tkip.tx.iv16 == 0)
key->u.tkip.tx.iv32++;
-
- pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
+ pos = ieee80211_tkip_add_iv(pos, key);
+ spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
/* hwaccel - with software IV */
if (info->control.hw_key)
@@ -217,9 +216,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
/* Add room for ICV */
skb_put(skb, TKIP_ICV_LEN);
- hdr = (struct ieee80211_hdr *) skb->data;
return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm,
- key, pos, len, hdr->addr2);
+ key, skb, pos, len);
}
@@ -247,11 +245,6 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- int queue = rx->queue;
-
- /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
- if (rx->queue == NUM_RX_DATA_QUEUES - 1)
- queue = 0;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -272,7 +265,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
key, skb->data + hdrlen,
skb->len - hdrlen, rx->sta->sta.addr,
- hdr->addr1, hwaccel, queue,
+ hdr->addr1, hwaccel, rx->security_idx,
&rx->tkip_iv32,
&rx->tkip_iv16);
if (res != TKIP_DECRYPT_OK)
@@ -300,8 +293,10 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
unsigned int hdrlen;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- b_0 = scratch + 3 * AES_BLOCK_LEN;
- aad = scratch + 4 * AES_BLOCK_LEN;
+ memset(scratch, 0, 6 * AES_BLOCK_SIZE);
+
+ b_0 = scratch + 3 * AES_BLOCK_SIZE;
+ aad = scratch + 4 * AES_BLOCK_SIZE;
/*
* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
@@ -390,8 +385,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
struct ieee80211_key *key = tx->key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, len, tail;
- u8 *pos, *pn;
- int i;
+ u8 *pos;
+ u8 pn[6];
+ u64 pn64;
+ u8 scratch[6 * AES_BLOCK_SIZE];
if (info->control.hw_key &&
!(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
@@ -419,14 +416,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *) pos;
pos += hdrlen;
- /* PN = PN + 1 */
- pn = key->u.ccmp.tx_pn;
+ pn64 = atomic64_inc_return(&key->u.ccmp.tx_pn);
- for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
- pn[i]++;
- if (pn[i])
- break;
- }
+ pn[5] = pn64;
+ pn[4] = pn64 >> 8;
+ pn[3] = pn64 >> 16;
+ pn[2] = pn64 >> 24;
+ pn[1] = pn64 >> 32;
+ pn[0] = pn64 >> 40;
ccmp_pn2hdr(pos, pn, key->conf.keyidx);
@@ -435,8 +432,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
- ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len,
+ ccmp_special_blocks(skb, pn, scratch, 0);
+ ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len,
pos, skb_put(skb, CCMP_MIC_LEN));
return 0;
@@ -483,8 +480,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
ccmp_hdr2pn(pn, skb->data + hdrlen);
- queue = ieee80211_is_mgmt(hdr->frame_control) ?
- NUM_RX_DATA_QUEUES : rx->queue;
+ queue = rx->security_idx;
if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
key->u.ccmp.replays++;
@@ -492,11 +488,12 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
}
if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ u8 scratch[6 * AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1);
+ ccmp_special_blocks(skb, pn, scratch, 1);
if (ieee80211_aes_ccm_decrypt(
- key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf,
+ key->u.ccmp.tfm, scratch,
skb->data + hdrlen + CCMP_HDR_LEN, data_len,
skb->data + skb->len - CCMP_MIC_LEN,
skb->data + hdrlen + CCMP_HDR_LEN))
@@ -527,6 +524,16 @@ static void bip_aad(struct sk_buff *skb, u8 *aad)
}
+static inline void bip_ipn_set64(u8 *d, u64 pn)
+{
+ *d++ = pn;
+ *d++ = pn >> 8;
+ *d++ = pn >> 16;
+ *d++ = pn >> 24;
+ *d++ = pn >> 32;
+ *d = pn >> 40;
+}
+
static inline void bip_ipn_swap(u8 *d, const u8 *s)
{
*d++ = s[5];
@@ -545,8 +552,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie *mmie;
- u8 *pn, aad[20];
- int i;
+ u8 aad[20];
+ u64 pn64;
if (info->control.hw_key)
return 0;
@@ -560,22 +567,17 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
- pn = key->u.aes_cmac.tx_pn;
+ pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
- for (i = sizeof(key->u.aes_cmac.tx_pn) - 1; i >= 0; i--) {
- pn[i]++;
- if (pn[i])
- break;
- }
- bip_ipn_swap(mmie->sequence_number, pn);
+ bip_ipn_set64(mmie->sequence_number, pn64);
bip_aad(skb, aad);
/*
* MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64)
*/
- ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.tx_crypto_buf,
- aad, skb->data + 24, skb->len - 24, mmie->mic);
+ ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
+ skb->data + 24, skb->len - 24, mmie->mic);
return TX_CONTINUE;
}
@@ -613,8 +615,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
if (!(status->flag & RX_FLAG_DECRYPTED)) {
/* hardware didn't decrypt/verify MIC */
bip_aad(skb, aad);
- ieee80211_aes_cmac(key->u.aes_cmac.tfm,
- key->u.aes_cmac.rx_crypto_buf, aad,
+ ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
key->u.aes_cmac.icverrors++;
diff --git a/net/netlabel/Makefile b/net/netlabel/Makefile
index ea750e9..d2732fc 100644
--- a/net/netlabel/Makefile
+++ b/net/netlabel/Makefile
@@ -1,8 +1,6 @@
#
# Makefile for the NetLabel subsystem.
#
-# Feb 9, 2006, Paul Moore <paul.moore@hp.com>
-#
# base objects
obj-y := netlabel_user.o netlabel_kapi.o
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index c051913..96b749d 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 2b9644e..fdbc1d2 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index bae5756..6bf8783 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -39,7 +39,7 @@
#include <net/genetlink.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_user.h"
#include "netlabel_cipso_v4.h"
diff --git a/net/netlabel/netlabel_cipso_v4.h b/net/netlabel/netlabel_cipso_v4.h
index af7f335..d24d774 100644
--- a/net/netlabel/netlabel_cipso_v4.h
+++ b/net/netlabel/netlabel_cipso_v4.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 10b273a..bf99567 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -55,8 +55,7 @@ struct netlbl_domhsh_tbl {
* should be okay */
static DEFINE_SPINLOCK(netlbl_domhsh_lock);
#define netlbl_domhsh_rcu_deref(p) \
- rcu_dereference_check(p, rcu_read_lock_held() || \
- lockdep_is_held(&netlbl_domhsh_lock))
+ rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock))
static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
@@ -258,7 +257,7 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
{
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct netlbl_af6list *iter6;
struct netlbl_domaddr6_map *map6;
#endif /* IPv6 */
@@ -292,7 +291,7 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
return -EINVAL;
}
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6);
switch (map6->type) {
@@ -521,7 +520,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
if (entry != rcu_dereference(netlbl_domhsh_def))
list_del_rcu(&entry->list);
else
- rcu_assign_pointer(netlbl_domhsh_def, NULL);
+ RCU_INIT_POINTER(netlbl_domhsh_def, NULL);
} else
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 0261dda..bfcc0f7 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 1b83e00..bcecae0 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -5,7 +5,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -39,7 +39,7 @@
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <asm/bug.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_domainhash.h"
#include "netlabel_unlabeled.h"
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *map4 = NULL;
struct netlbl_domaddr6_map *map6 = NULL;
- const struct in_addr *addr4, *mask4;
- const struct in6_addr *addr6, *mask6;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
INIT_LIST_HEAD(&addrmap->list6);
switch (family) {
- case AF_INET:
- addr4 = addr;
- mask4 = mask;
+ case AF_INET: {
+ const struct in_addr *addr4 = addr;
+ const struct in_addr *mask4 = mask;
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
- case AF_INET6:
- addr6 = addr;
- mask6 = mask;
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6: {
+ const struct in6_addr *addr6 = addr;
+ const struct in6_addr *mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
@@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
ipv6_addr_copy(&map6->list.mask, mask6);
map6->list.valid = 1;
- ret_val = netlbl_af4list_add(&map4->list,
- &addrmap->list4);
+ ret_val = netlbl_af6list_add(&map6->list,
+ &addrmap->list6);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
+ }
+#endif /* IPv6 */
default:
goto cfg_unlbl_map_add_failure;
break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
@@ -341,11 +347,11 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
- return -ENOMEM;
+ goto out_entry;
if (domain != NULL) {
entry->domain = kstrdup(domain, GFP_ATOMIC);
if (entry->domain == NULL)
- goto cfg_cipsov4_map_add_failure;
+ goto out_domain;
}
if (addr == NULL && mask == NULL) {
@@ -354,13 +360,13 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
- goto cfg_cipsov4_map_add_failure;
+ goto out_addrmap;
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
- goto cfg_cipsov4_map_add_failure;
+ goto out_addrinfo;
addrinfo->type_def.cipsov4 = doi_def;
addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->list.addr = addr->s_addr & mask->s_addr;
@@ -374,7 +380,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
entry->type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
- goto cfg_cipsov4_map_add_failure;
+ goto out_addrmap;
}
ret_val = netlbl_domhsh_add(entry, audit_info);
@@ -384,11 +390,15 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
return 0;
cfg_cipsov4_map_add_failure:
- cipso_v4_doi_putdef(doi_def);
+ kfree(addrinfo);
+out_addrinfo:
+ kfree(addrmap);
+out_addrmap:
kfree(entry->domain);
+out_domain:
kfree(entry);
- kfree(addrmap);
- kfree(addrinfo);
+out_entry:
+ cipso_v4_doi_putdef(doi_def);
return ret_val;
}
@@ -513,7 +523,7 @@ int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap,
/**
* netlbl_secattr_catmap_setbit - Set a bit in a LSM secattr catmap
- * @catmap: the category bitmap
+ * @catmap: pointer to the category bitmap
* @bit: the bit to set
* @flags: memory allocation flags
*
@@ -522,18 +532,25 @@ int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap,
* negative values on failure.
*
*/
-int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap,
+int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap **catmap,
u32 bit,
gfp_t flags)
{
- struct netlbl_lsm_secattr_catmap *iter = catmap;
+ struct netlbl_lsm_secattr_catmap *iter = *catmap;
u32 node_bit;
u32 node_idx;
while (iter->next != NULL &&
bit >= (iter->startbit + NETLBL_CATMAP_SIZE))
iter = iter->next;
- if (bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
+ if (bit < iter->startbit) {
+ iter = netlbl_secattr_catmap_alloc(flags);
+ if (iter == NULL)
+ return -ENOMEM;
+ iter->next = *catmap;
+ iter->startbit = bit & ~(NETLBL_CATMAP_SIZE - 1);
+ *catmap = iter;
+ } else if (bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
iter->next = netlbl_secattr_catmap_alloc(flags);
if (iter->next == NULL)
return -ENOMEM;
@@ -551,7 +568,7 @@ int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap,
/**
* netlbl_secattr_catmap_setrng - Set a range of bits in a LSM secattr catmap
- * @catmap: the category bitmap
+ * @catmap: pointer to the category bitmap
* @start: the starting bit
* @end: the last bit in the string
* @flags: memory allocation flags
@@ -561,15 +578,16 @@ int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap,
* on success, negative values on failure.
*
*/
-int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
+int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap **catmap,
u32 start,
u32 end,
gfp_t flags)
{
int ret_val = 0;
- struct netlbl_lsm_secattr_catmap *iter = catmap;
+ struct netlbl_lsm_secattr_catmap *iter = *catmap;
u32 iter_max_spot;
u32 spot;
+ u32 orig_spot = iter->startbit;
/* XXX - This could probably be made a bit faster by combining writes
* to the catmap instead of setting a single bit each time, but for
@@ -587,7 +605,9 @@ int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap,
iter = iter->next;
iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE;
}
- ret_val = netlbl_secattr_catmap_setbit(iter, spot, GFP_ATOMIC);
+ ret_val = netlbl_secattr_catmap_setbit(&iter, spot, flags);
+ if (iter->startbit < orig_spot)
+ *catmap = iter;
}
return ret_val;
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 4f251b1..bfa5558 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -42,7 +42,7 @@
#include <net/ipv6.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_domainhash.h"
#include "netlabel_user.h"
diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h
index 0a25838..5a9f31c 100644
--- a/net/netlabel/netlabel_mgmt.h
+++ b/net/netlabel/netlabel_mgmt.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -32,7 +32,7 @@
#define _NETLABEL_MGMT_H
#include <net/netlabel.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
* The following NetLabel payloads are supported by the management interface.
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 0f0e907..23267b3 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -5,7 +5,7 @@
* NetLabel system. The NetLabel system manages static and dynamic label
* mappings for network protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -52,7 +52,7 @@
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <asm/bug.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_user.h"
#include "netlabel_addrlist.h"
@@ -116,8 +116,7 @@ struct netlbl_unlhsh_walk_arg {
* hash table should be okay */
static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
#define netlbl_unlhsh_rcu_deref(p) \
- rcu_dereference_check(p, rcu_read_lock_held() || \
- lockdep_is_held(&netlbl_unlhsh_lock))
+ rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock))
static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
@@ -426,10 +425,9 @@ int netlbl_unlhsh_add(struct net *net,
audit_info);
switch (addr_len) {
case sizeof(struct in_addr): {
- struct in_addr *addr4, *mask4;
+ const struct in_addr *addr4 = addr;
+ const struct in_addr *mask4 = mask;
- addr4 = (struct in_addr *)addr;
- mask4 = (struct in_addr *)mask;
ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid);
if (audit_buf != NULL)
netlbl_af4list_audit_addr(audit_buf, 1,
@@ -440,10 +438,9 @@ int netlbl_unlhsh_add(struct net *net,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case sizeof(struct in6_addr): {
- struct in6_addr *addr6, *mask6;
+ const struct in6_addr *addr6 = addr;
+ const struct in6_addr *mask6 = mask;
- addr6 = (struct in6_addr *)addr;
- mask6 = (struct in6_addr *)mask;
ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid);
if (audit_buf != NULL)
netlbl_af6list_audit_addr(audit_buf, 1,
@@ -624,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
if (iface->ifindex > 0)
list_del_rcu(&iface->list);
else
- rcu_assign_pointer(netlbl_unlhsh_def, NULL);
+ RCU_INIT_POINTER(netlbl_unlhsh_def, NULL);
spin_unlock(&netlbl_unlhsh_lock);
call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
@@ -1445,11 +1442,9 @@ int __init netlbl_unlabel_init(u32 size)
for (iter = 0; iter < hsh_tbl->size; iter++)
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
- rcu_read_lock();
spin_lock(&netlbl_unlhsh_lock);
rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
spin_unlock(&netlbl_unlhsh_lock);
- rcu_read_unlock();
register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier);
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
index 0bc8dc3..700af49 100644
--- a/net/netlabel/netlabel_unlabeled.h
+++ b/net/netlabel/netlabel_unlabeled.h
@@ -5,7 +5,7 @@
* NetLabel system. The NetLabel system manages static and dynamic label
* mappings for network protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index a3fd75a..9fae63f 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index f4fc4c9..8196978 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 3df7c5a..b4d889b 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1182,10 +1182,9 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
+ msg->msg_namelen = sizeof(*sax);
}
- msg->msg_namelen = sizeof(*sax);
-
skb_free_datagram(sk, skb);
release_sock(sk);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 44059d0..915a87b 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -37,6 +37,7 @@
#include <linux/spinlock.h>
#include <net/netrom.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static unsigned int nr_neigh_no = 1;
@@ -257,9 +258,12 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
case 3:
if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
switch (nr_node->which) {
- case 0: nr_node->which = 1; break;
- case 1: nr_node->which = 0; break;
- default: break;
+ case 0:
+ nr_node->which = 1;
+ break;
+ case 1:
+ nr_node->which = 0;
+ break;
}
nr_route = nr_node->routes[0];
nr_node->routes[0] = nr_node->routes[1];
@@ -505,12 +509,13 @@ static int nr_dec_obs(void)
s->count--;
switch (i) {
- case 0:
- s->routes[0] = s->routes[1];
- case 1:
- s->routes[1] = s->routes[2];
- case 2:
- break;
+ case 0:
+ s->routes[0] = s->routes[1];
+ /* Fallthrough */
+ case 1:
+ s->routes[1] = s->routes[2];
+ case 2:
+ break;
}
break;
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 8e12c8a..78efe89 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,11 +10,6 @@ menuconfig RFKILL
To compile this driver as a module, choose M here: the
module will be called rfkill.
-config RFKILL_PM
- bool "Power off on suspend"
- depends on RFKILL && PM
- default y
-
# LED trigger support
config RFKILL_LEDS
bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index df2dae6..5be1957 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -235,7 +235,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
else
rfkill->state &= ~RFKILL_BLOCK_HW;
*change = prev != blocked;
- any = rfkill->state & RFKILL_BLOCK_ANY;
+ any = !!(rfkill->state & RFKILL_BLOCK_ANY);
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
@@ -769,7 +769,6 @@ void rfkill_pause_polling(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_pause_polling);
-#ifdef CONFIG_RFKILL_PM
void rfkill_resume_polling(struct rfkill *rfkill)
{
BUG_ON(!rfkill);
@@ -804,17 +803,14 @@ static int rfkill_resume(struct device *dev)
return 0;
}
-#endif
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_attrs = rfkill_dev_attrs,
.dev_uevent = rfkill_dev_uevent,
-#ifdef CONFIG_RFKILL_PM
.suspend = rfkill_suspend,
.resume = rfkill_resume,
-#endif
};
bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 1bca6d4..24c55c5 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -15,6 +15,7 @@
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/rfkill.h>
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 256c5dd..128677d 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -101,6 +101,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
if (!rfkill)
return -ENOMEM;
+ if (pdata->gpio_runtime_setup) {
+ ret = pdata->gpio_runtime_setup(pdev);
+ if (ret) {
+ pr_warn("%s: can't set up gpio\n", __func__);
+ return ret;
+ }
+ }
+
rfkill->pdata = pdata;
len = strlen(pdata->name);
@@ -182,7 +190,10 @@ fail_alloc:
static int rfkill_gpio_remove(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
+ struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
+ if (pdata->gpio_runtime_close)
+ pdata->gpio_runtime_close(pdev);
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 18dc512..3ca7277 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -90,7 +90,6 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
pdata->type,
&rfkill_regulator_ops, rfkill_data);
if (rf_kill == NULL) {
- dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
ret = -ENOMEM;
goto err_rfkill_alloc;
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 1f96fb9..233dbe6 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -195,7 +195,8 @@ static void rose_kill_by_device(struct net_device *dev)
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
- rose->neighbour->use--;
+ if (rose->neighbour)
+ rose->neighbour->use--;
rose->device = NULL;
}
}
@@ -1221,7 +1222,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
- struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
@@ -1257,24 +1257,19 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (srose != NULL) {
- memset(srose, 0, msg->msg_namelen);
+ if (msg->msg_name) {
+ struct sockaddr_rose *srose;
+ struct full_sockaddr_rose *full_srose = msg->msg_name;
+
+ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
+ srose = msg->msg_name;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
- for (n = 0 ; n < rose->dest_ndigis ; n++)
- full_srose->srose_digis[n] = rose->dest_digis[n];
- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
- } else {
- if (rose->dest_ndigis >= 1) {
- srose->srose_ndigis = 1;
- srose->srose_digi = rose->dest_digis[0];
- }
- msg->msg_namelen = sizeof(struct sockaddr_rose);
- }
+ for (n = 0 ; n < rose->dest_ndigis ; n++)
+ full_srose->srose_digis[n] = rose->dest_digis[n];
+ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
}
skb_free_datagram(sk, skb);
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index fa5f564..7a02bd1 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -266,13 +266,6 @@ void rose_transmit_link(struct sk_buff *skb, struct rose_neigh *neigh)
{
unsigned char *dptr;
-#if 0
- if (call_fw_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT) {
- kfree_skb(skb);
- return;
- }
-#endif
-
if (neigh->loopback) {
rose_loopback_queue(skb, neigh);
return;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 479cae5..cd9b7ee 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static unsigned int rose_neigh_no = 1;
@@ -864,11 +865,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
int res = 0;
char buf[11];
-#if 0
- if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT)
- return res;
-#endif
-
if (skb->len < ROSE_MIN_LEN)
return res;
frametype = skb->data[2];
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 5f22e26..338d793 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/circ_buf.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 0c65013..5cc2da5 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
@@ -86,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb) {
/* nothing remains on the queue */
if (copied &&
- (msg->msg_flags & MSG_PEEK || timeo == 0))
+ (flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
@@ -142,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
/* copy the peer address and timestamp */
if (!continue_call) {
- if (msg->msg_name && msg->msg_namelen > 0)
+ if (msg->msg_name) {
+ size_t len =
+ sizeof(call->conn->trans->peer->srx);
memcpy(msg->msg_name,
- &call->conn->trans->peer->srx,
- sizeof(call->conn->trans->peer->srx));
+ &call->conn->trans->peer->srx, len);
+ msg->msg_namelen = len;
+ }
sock_recv_ts_and_drops(msg, &rx->sk, skb);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6c8c8da..d014b05 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -280,6 +280,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->peer.asconf_capable = 0;
if (sctp_addip_noauth)
asoc->peer.asconf_capable = 1;
+ asoc->asconf_addr_del_pending = NULL;
+ asoc->src_out_of_asoc_ok = 0;
+ asoc->new_transport = NULL;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
@@ -386,7 +389,7 @@ void sctp_association_free(struct sctp_association *asoc)
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
- if (!asoc->temp) {
+ if (!list_empty(&asoc->asocs)) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening
@@ -446,6 +449,10 @@ void sctp_association_free(struct sctp_association *asoc)
sctp_asconf_queue_teardown(asoc);
+ /* Free pending address space being deleted */
+ if (asoc->asconf_addr_del_pending != NULL)
+ kfree(asoc->asconf_addr_del_pending);
+
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1181,6 +1188,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
asoc->peer.sack_needed = new->peer.sack_needed;
+ asoc->peer.auth_capable = new->peer.auth_capable;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
asoc->peer.i.initial_tsn, GFP_ATOMIC);
@@ -1264,7 +1272,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
- sctp_auth_key_put(asoc->asoc_shared_key);
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
@@ -1630,6 +1637,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
* ack chunk whose serial number matches that of the request.
*/
list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
+ if (sctp_chunk_pending(ack))
+ continue;
if (ack->subh.addip_hdr->serial == serial) {
sctp_chunk_hold(ack);
return ack;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 418ebe4..53d455c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
struct sctp_auth_bytes *key;
/* Verify that we are not going to overflow INT_MAX */
- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
return NULL;
/* Allocate the shared key */
@@ -866,8 +866,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
list_add(&cur_key->key_list, sh_keys);
cur_key->key = key;
- sctp_auth_key_hold(key);
-
return 0;
nomem:
if (!replace)
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 83e3011..4ece451 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -430,7 +430,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
list_for_each_entry(laddr, &bp->address_list, list) {
addr_buf = (union sctp_addr *)addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af)
break;
@@ -534,6 +534,21 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
return 0;
}
+int sctp_is_ep_boundall(struct sock *sk)
+{
+ struct sctp_bind_addr *bp;
+ struct sctp_sockaddr_entry *addr;
+
+ bp = &sctp_sk(sk)->ep->base.bind_addr;
+ if (sctp_list_single_entry(&bp->address_list)) {
+ addr = list_entry(bp->address_list.next,
+ struct sctp_sockaddr_entry, list);
+ if (sctp_is_any(sk, &addr->a))
+ return 1;
+ }
+ return 0;
+}
+
/********************************************************************
* 3rd Level Abstractions
********************************************************************/
diff --git a/net/sctp/input.c b/net/sctp/input.c
index cd9eded..0fc18c7 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -510,8 +510,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
* discard the packet.
*/
if (vtag == 0) {
- chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr
- + sizeof(struct sctphdr));
+ chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
+ sizeof(__be32) ||
chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 397296f..32421ae 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -152,18 +152,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
} else {
/* Nothing to do. Next chunk in the packet, please. */
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
-
/* Force chunk->skb->data to chunk->chunk_end. */
- skb_pull(chunk->skb,
- chunk->chunk_end - chunk->skb->data);
-
- /* Verify that we have at least chunk headers
- * worth of buffer left.
- */
- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
- sctp_chunk_free(chunk);
- chunk = queue->in_progress = NULL;
- }
+ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
+ /* We are guaranteed to pull a SCTP header. */
}
}
@@ -199,24 +190,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
+ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+ skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
- /* RFC 2960, Section 6.10 Bundling
- *
- * Partial chunks MUST NOT be placed in an SCTP packet.
- * If the receiver detects a partial chunk, it MUST drop
- * the chunk.
- *
- * Since the end of the chunk is past the end of our buffer
- * (which contains the whole packet, we can freely discard
- * the whole packet.
- */
- sctp_chunk_free(chunk);
- chunk = queue->in_progress = NULL;
-
- return NULL;
+ /* Discard inside state machine. */
+ chunk->pdiscard = 1;
+ chunk->chunk_end = skb_tail_pointer(chunk->skb);
} else {
/* We are at the end of the packet, so mark the chunk
* in case we need to send a SACK.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 609adfa..0b6a391 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -112,6 +112,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
addr->valid = 1;
spin_lock_bh(&sctp_local_addr_lock);
list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
spin_unlock_bh(&sctp_local_addr_lock);
}
break;
@@ -122,6 +123,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
if (addr->a.sa.sa_family == AF_INET6 &&
ipv6_addr_equal(&addr->a.v6.sin6_addr,
&ifa->addr)) {
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
list_del_rcu(&addr->list);
@@ -213,12 +215,14 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
__func__, skb, skb->len,
&fl6->saddr, &fl6->daddr);
+ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+
if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->local_df = 1;
SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
- return ip6_xmit(sk, skb, fl6, np->opt);
+ return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
}
/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 32ba8d0..c3b8549 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -384,12 +384,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
sk = chunk->skb->sk;
/* Allocate the new skb. */
- nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
if (!nskb)
goto nomem;
/* Make sure the outbound skb has enough header room reserved. */
- skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
+ skb_reserve(nskb, packet->overhead + MAX_HEADER);
/* Set the owning socket so that we know where to get the
* destination IP address.
@@ -518,7 +518,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
if (!sctp_checksum_disable) {
- if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+ if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
+ (dst_xfrm(dst) != NULL) || packet->ipfragok) {
__u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
/* 3) Put the resultant value into the checksum field in the
@@ -586,7 +587,7 @@ out:
return err;
no_route:
kfree_skb(nskb);
- IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(&init_net, IPSTATS_MIB_OUTNOROUTES);
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 35e44e2..3dd7207 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -752,6 +752,16 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+ /* RFC 5061, 5.3
+ * F1) This means that until such time as the ASCONF
+ * containing the add is acknowledged, the sender MUST
+ * NOT use the new IP address as a source for ANY SCTP
+ * packet except on carrying an ASCONF Chunk.
+ */
+ if (asoc->src_out_of_asoc_ok &&
+ chunk->chunk_hdr->type != SCTP_CID_ASCONF)
+ continue;
+
list_del_init(&chunk->list);
/* Pick the right transport to use. */
@@ -879,6 +889,9 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
}
+ if (q->asoc->src_out_of_asoc_ok)
+ goto sctp_flush_out;
+
/* Is it OK to send data chunks? */
switch (asoc->state) {
case SCTP_STATE_COOKIE_ECHOED:
@@ -902,6 +915,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
* current cwnd).
*/
if (!list_empty(&q->retransmit)) {
+ if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
+ goto sctp_flush_out;
if (transport == asoc->peer.retran_path)
goto retran;
@@ -974,6 +989,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
((new_transport->state == SCTP_INACTIVE) ||
(new_transport->state == SCTP_UNCONFIRMED)))
new_transport = asoc->peer.active_path;
+ if (new_transport->state == SCTP_UNCONFIRMED)
+ continue;
/* Change packets if necessary. */
if (new_transport != transport) {
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 05a6ce2..1e2eee8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/seq_file.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <net/sctp/sctp.h>
#include <net/ip.h> /* for snmp_fold_field */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 946afd6..de35e01 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -503,7 +503,9 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
- if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
+ if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) ||
+ (laddr->state != SCTP_ADDR_SRC &&
+ !asoc->src_out_of_asoc_ok))
continue;
if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
goto out_unlock;
@@ -526,8 +528,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
continue;
if ((laddr->state == SCTP_ADDR_SRC) &&
(AF_INET == laddr->a.sa.sa_family)) {
- fl4->saddr = laddr->a.v4.sin_addr.s_addr;
fl4->fl4_sport = laddr->a.v4.sin_port;
+ flowi4_update_output(fl4,
+ asoc->base.sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(asoc->base.sk),
+ daddr->v4.sin_addr.s_addr,
+ laddr->a.v4.sin_addr.s_addr);
+
rt = ip_route_output_key(&init_net, fl4);
if (!IS_ERR(rt)) {
dst = &rt->dst;
@@ -623,6 +630,143 @@ static void sctp_v4_ecn_capable(struct sock *sk)
INET_ECN_xmit(sk);
}
+void sctp_addr_wq_timeout_handler(unsigned long arg)
+{
+ struct sctp_sockaddr_entry *addrw, *temp;
+ struct sctp_sock *sp;
+
+ spin_lock_bh(&sctp_addr_wq_lock);
+
+ list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+ SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
+ " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+ addrw);
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ /* Now we send an ASCONF for each association */
+ /* Note. we currently don't handle link local IPv6 addressees */
+ if (addrw->a.sa.sa_family == AF_INET6) {
+ struct in6_addr *in6;
+
+ if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
+ IPV6_ADDR_LINKLOCAL)
+ goto free_next;
+
+ in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
+ if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+ addrw->state == SCTP_ADDR_NEW) {
+ unsigned long timeo_val;
+
+ SCTP_DEBUG_PRINTK("sctp_timo_handler: this is on DAD, trying %d sec later\n",
+ SCTP_ADDRESS_TICK_DELAY);
+ timeo_val = jiffies;
+ timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+ mod_timer(&sctp_addr_wq_timer, timeo_val);
+ break;
+ }
+ }
+#endif
+ list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+ struct sock *sk;
+
+ sk = sctp_opt2sk(sp);
+ /* ignore bound-specific endpoints */
+ if (!sctp_is_ep_boundall(sk))
+ continue;
+ sctp_bh_lock_sock(sk);
+ if (sctp_asconf_mgmt(sp, addrw) < 0)
+ SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
+ sctp_bh_unlock_sock(sk);
+ }
+free_next:
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
+static void sctp_free_addr_wq(void)
+{
+ struct sctp_sockaddr_entry *addrw;
+ struct sctp_sockaddr_entry *temp;
+
+ spin_lock_bh(&sctp_addr_wq_lock);
+ del_timer(&sctp_addr_wq_timer);
+ list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
+/* lookup the entry for the same address in the addr_waitq
+ * sctp_addr_wq MUST be locked
+ */
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+{
+ struct sctp_sockaddr_entry *addrw;
+
+ list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+ if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
+ continue;
+ if (addrw->a.sa.sa_family == AF_INET) {
+ if (addrw->a.v4.sin_addr.s_addr ==
+ addr->a.v4.sin_addr.s_addr)
+ return addrw;
+ } else if (addrw->a.sa.sa_family == AF_INET6) {
+ if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
+ &addr->a.v6.sin6_addr))
+ return addrw;
+ }
+ }
+ return NULL;
+}
+
+void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+{
+ struct sctp_sockaddr_entry *addrw;
+ unsigned long timeo_val;
+
+ /* first, we check if an opposite message already exist in the queue.
+ * If we found such message, it is removed.
+ * This operation is a bit stupid, but the DHCP client attaches the
+ * new address after a couple of addition and deletion of that address
+ */
+
+ spin_lock_bh(&sctp_addr_wq_lock);
+ /* Offsets existing events in addr_wq */
+ addrw = sctp_addr_wq_lookup(addr);
+ if (addrw) {
+ if (addrw->state != cmd) {
+ SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
+ " in wq %p\n", addrw->state, &addrw->a,
+ &sctp_addr_waitq);
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+ return;
+ }
+
+ /* OK, we have to add the new address to the wait queue */
+ addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ if (addrw == NULL) {
+ spin_unlock_bh(&sctp_addr_wq_lock);
+ return;
+ }
+ addrw->state = cmd;
+ list_add_tail(&addrw->list, &sctp_addr_waitq);
+ SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
+ " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+
+ if (!timer_pending(&sctp_addr_wq_timer)) {
+ timeo_val = jiffies;
+ timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+ mod_timer(&sctp_addr_wq_timer, timeo_val);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
/* Event handler for inet address addition/deletion events.
* The sctp_local_addr_list needs to be protocted by a spin lock since
* multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -650,6 +794,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
addr->valid = 1;
spin_lock_bh(&sctp_local_addr_lock);
list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
spin_unlock_bh(&sctp_local_addr_lock);
}
break;
@@ -660,6 +805,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
if (addr->a.sa.sa_family == AF_INET &&
addr->a.v4.sin_addr.s_addr ==
ifa->ifa_local) {
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
list_del_rcu(&addr->list);
@@ -1161,7 +1307,7 @@ SCTP_STATIC __init int sctp_init(void)
max_share = min(4UL*1024*1024, limit);
sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
- sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1));
+ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
@@ -1236,6 +1382,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable ADDIP by default. */
sctp_addip_enable = 0;
sctp_addip_noauth = 0;
+ sctp_default_auto_asconf = 0;
/* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1;
@@ -1260,6 +1407,13 @@ SCTP_STATIC __init int sctp_init(void)
spin_lock_init(&sctp_local_addr_lock);
sctp_get_local_addr_list();
+ /* Initialize the address event list */
+ INIT_LIST_HEAD(&sctp_addr_waitq);
+ INIT_LIST_HEAD(&sctp_auto_asconf_splist);
+ spin_lock_init(&sctp_addr_wq_lock);
+ sctp_addr_wq_timer.expires = 0;
+ setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
+
status = sctp_v4_protosw_init();
if (status)
@@ -1331,6 +1485,7 @@ SCTP_STATIC __exit void sctp_exit(void)
/* Unregister with inet6/inet layers. */
sctp_v6_del_protocol();
sctp_v4_del_protocol();
+ sctp_free_addr_wq();
/* Free the control endpoint. */
inet_ctl_sock_destroy(sctp_ctl_sock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 58eb27f..743a644 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1366,8 +1366,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
BUG_ON(!list_empty(&chunk->list));
list_del_init(&chunk->transmitted_list);
- /* Free the chunk skb data and the SCTP_chunk stub itself. */
- dev_kfree_skb(chunk->skb);
+ consume_skb(chunk->skb);
+ consume_skb(chunk->auth_chunk);
SCTP_DBG_OBJCNT_DEC(chunk);
kmem_cache_free(sctp_chunk_cachep, chunk);
@@ -2569,7 +2569,10 @@ do_addr_param:
addr_param = param.v + sizeof(sctp_addip_param_t);
- af = sctp_get_af_specific(param_type2af(param.p->type));
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+ if (af == NULL)
+ break;
+
af->from_addr_param(&addr, addr_param,
htons(asoc->peer.port), 0);
@@ -2768,11 +2771,12 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
int addr_param_len = 0;
int totallen = 0;
int i;
+ int del_pickup = 0;
/* Get total length of all the address parameters. */
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
addr_param_len = af->to_addr_param(addr, &addr_param);
@@ -2780,6 +2784,13 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
totallen += addr_param_len;
addr_buf += af->sockaddr_len;
+ if (asoc->asconf_addr_del_pending && !del_pickup) {
+ /* reuse the parameter length from the same scope one */
+ totallen += paramlen;
+ totallen += addr_param_len;
+ del_pickup = 1;
+ SCTP_DEBUG_PRINTK("mkasconf_update_ip: picked same-scope del_pending addr, totallen for all addresses is %d\n", totallen);
+ }
}
/* Create an asconf chunk with the required length. */
@@ -2790,7 +2801,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
/* Add the address parameters to the asconf chunk. */
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = flags;
@@ -2802,6 +2813,17 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_buf += af->sockaddr_len;
}
+ if (flags == SCTP_PARAM_ADD_IP && del_pickup) {
+ addr = asoc->asconf_addr_del_pending;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ addr_param_len = af->to_addr_param(addr, &addr_param);
+ param.param_hdr.type = SCTP_PARAM_DEL_IP;
+ param.param_hdr.length = htons(paramlen + addr_param_len);
+ param.crr_id = i;
+
+ sctp_addto_chunk(retval, paramlen, &param);
+ sctp_addto_chunk(retval, addr_param_len, &addr_param);
+ }
return retval;
}
@@ -2939,8 +2961,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
union sctp_addr addr;
union sctp_addr_param *addr_param;
- addr_param = (union sctp_addr_param *)
- ((void *)asconf_param + sizeof(sctp_addip_param_t));
+ addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
@@ -2997,6 +3018,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
/* Start the heartbeat timer. */
if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
sctp_transport_hold(peer);
+ asoc->new_transport = peer;
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
@@ -3014,7 +3036,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
* an Error Cause TLV set to the new error code 'Request to
* Delete Source IP Address'
*/
- if (sctp_cmp_addr_exact(sctp_source(asconf), &addr))
+ if (sctp_cmp_addr_exact(&asconf->source, &addr))
return SCTP_ERROR_DEL_SRC_IP;
/* Section 4.2.2
@@ -3049,50 +3071,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
return SCTP_ERROR_NO_ERROR;
}
-/* Verify the ASCONF packet before we process it. */
-int sctp_verify_asconf(const struct sctp_association *asoc,
- struct sctp_paramhdr *param_hdr, void *chunk_end,
- struct sctp_paramhdr **errp) {
- sctp_addip_param_t *asconf_param;
+/* Verify the ASCONF packet before we process it. */
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, bool addr_param_needed,
+ struct sctp_paramhdr **errp)
+{
+ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
union sctp_params param;
- int length, plen;
+ bool addr_param_seen = false;
- param.v = (sctp_paramhdr_t *) param_hdr;
- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
- length = ntohs(param.p->length);
- *errp = param.p;
-
- if (param.v > chunk_end - length ||
- length < sizeof(sctp_paramhdr_t))
- return 0;
+ sctp_walk_params(param, addip, addip_hdr.params) {
+ size_t length = ntohs(param.p->length);
+ *errp = param.p;
switch (param.p->type) {
+ case SCTP_PARAM_ERR_CAUSE:
+ break;
+ case SCTP_PARAM_IPV4_ADDRESS:
+ if (length != sizeof(sctp_ipv4addr_param_t))
+ return false;
+ addr_param_seen = true;
+ break;
+ case SCTP_PARAM_IPV6_ADDRESS:
+ if (length != sizeof(sctp_ipv6addr_param_t))
+ return false;
+ addr_param_seen = true;
+ break;
case SCTP_PARAM_ADD_IP:
case SCTP_PARAM_DEL_IP:
case SCTP_PARAM_SET_PRIMARY:
- asconf_param = (sctp_addip_param_t *)param.v;
- plen = ntohs(asconf_param->param_hdr.length);
- if (plen < sizeof(sctp_addip_param_t) +
- sizeof(sctp_paramhdr_t))
- return 0;
+ /* In ASCONF chunks, these need to be first. */
+ if (addr_param_needed && !addr_param_seen)
+ return false;
+ length = ntohs(param.addip->param_hdr.length);
+ if (length < sizeof(sctp_addip_param_t) +
+ sizeof(sctp_paramhdr_t))
+ return false;
break;
case SCTP_PARAM_SUCCESS_REPORT:
case SCTP_PARAM_ADAPTATION_LAYER_IND:
if (length != sizeof(sctp_addip_param_t))
- return 0;
-
+ return false;
break;
default:
- break;
+ /* This is unkown to us, reject! */
+ return false;
}
-
- param.v += WORD_ROUND(length);
}
- if (param.v != chunk_end)
- return 0;
+ /* Remaining sanity checks. */
+ if (addr_param_needed && !addr_param_seen)
+ return false;
+ if (!addr_param_needed && addr_param_seen)
+ return false;
+ if (param.v != chunk->chunk_end)
+ return false;
- return 1;
+ return true;
}
/* Process an incoming ASCONF chunk with the next expected serial no. and
@@ -3101,16 +3136,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf)
{
+ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
+ bool all_param_pass = true;
+ union sctp_params param;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
-
__be16 err_code;
int length = 0;
int chunk_len;
__u32 serial;
- int all_param_pass = 1;
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
@@ -3125,7 +3161,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
* asconf parameter.
*/
length = ntohs(addr_param->p.length);
- asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
+ asconf_param = (void *)addr_param + length;
chunk_len -= length;
/* create an ASCONF_ACK chunk.
@@ -3138,9 +3174,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
goto done;
/* Process the TLVs contained within the ASCONF chunk. */
- while (chunk_len > 0) {
+ sctp_walk_params(param, addip, addip_hdr.params) {
+ /* Skip preceeding address parameters. */
+ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
+ continue;
+
err_code = sctp_process_asconf_param(asoc, asconf,
- asconf_param);
+ param.addip);
/* ADDIP 4.1 A7)
* If an error response is received for a TLV parameter,
* all TLVs with no response before the failed TLV are
@@ -3148,29 +3189,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
* the failed response are considered unsuccessful unless
* a specific success indication is present for the parameter.
*/
- if (SCTP_ERROR_NO_ERROR != err_code)
- all_param_pass = 0;
-
+ if (err_code != SCTP_ERROR_NO_ERROR)
+ all_param_pass = false;
if (!all_param_pass)
- sctp_add_asconf_response(asconf_ack,
- asconf_param->crr_id, err_code,
- asconf_param);
+ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
+ err_code, param.addip);
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
*/
- if (SCTP_ERROR_RSRC_LOW == err_code)
+ if (err_code == SCTP_ERROR_RSRC_LOW)
goto done;
-
- /* Move to the next ASCONF param. */
- length = ntohs(asconf_param->param_hdr.length);
- asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
- length);
- chunk_len -= length;
}
-
done:
asoc->peer.addip_serial++;
@@ -3197,8 +3229,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
struct sctp_transport *transport;
struct sctp_sockaddr_entry *saddr;
- addr_param = (union sctp_addr_param *)
- ((void *)asconf_param + sizeof(sctp_addip_param_t));
+ addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
/* We have checked the packet before, so we do not check again. */
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
@@ -3224,6 +3255,11 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
case SCTP_PARAM_DEL_IP:
local_bh_disable();
sctp_del_bind_addr(bp, &addr);
+ if (asoc->asconf_addr_del_pending != NULL &&
+ sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) {
+ kfree(asoc->asconf_addr_del_pending);
+ asoc->asconf_addr_del_pending = NULL;
+ }
local_bh_enable();
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
@@ -3278,8 +3314,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
return SCTP_ERROR_NO_ERROR;
case SCTP_PARAM_ERR_CAUSE:
length = sizeof(sctp_addip_param_t);
- err_param = (sctp_errhdr_t *)
- ((void *)asconf_ack_param + length);
+ err_param = (void *)asconf_ack_param + length;
asconf_ack_len -= length;
if (asconf_ack_len > 0)
return err_param->cause;
@@ -3292,8 +3327,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
}
length = ntohs(asconf_ack_param->param_hdr.length);
- asconf_ack_param = (sctp_addip_param_t *)
- ((void *)asconf_ack_param + length);
+ asconf_ack_param = (void *)asconf_ack_param + length;
asconf_ack_len -= length;
}
@@ -3325,7 +3359,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
* pointer to the first asconf parameter.
*/
length = ntohs(addr_param->p.length);
- asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
+ asconf_param = (void *)addr_param + length;
asconf_len -= length;
/* ADDIP 4.1
@@ -3376,11 +3410,13 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
* one.
*/
length = ntohs(asconf_param->param_hdr.length);
- asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
- length);
+ asconf_param = (void *)asconf_param + length;
asconf_len -= length;
}
+ if (no_err && asoc->src_out_of_asoc_ok)
+ asoc->src_out_of_asoc_ok = 0;
+
/* Free the cached last sent asconf chunk. */
list_del_init(&asconf->transmitted_list);
sctp_chunk_free(asconf);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 6e0f882..581c06a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -681,7 +681,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
* outstanding data and rely on the retransmission limit be reached
* to shutdown the association.
*/
- if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
t->asoc->overall_error_count = 0;
/* Clear the hb_sent flag to signal that we had a good
@@ -1210,7 +1210,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
int local_cork = 0;
if (SCTP_EVENT_T_TIMEOUT != event_type)
- chunk = (struct sctp_chunk *) event_arg;
+ chunk = event_arg;
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
@@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;
+
+ case SCTP_CMD_SET_ASOC:
+ asoc = cmd->obj.asoc;
+ break;
+
default:
pr_warn("Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 2461171..d02dd3c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -163,6 +163,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+ /* Previously already marked? */
+ if (unlikely(chunk->pdiscard))
+ return 0;
if (unlikely(chunk_length < required_length))
return 0;
@@ -747,6 +750,12 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
struct sctp_chunk auth;
sctp_ierror_t ret;
+ /* Make sure that we and the peer are AUTH capable */
+ if (!sctp_auth_enable || !new_asoc->peer.auth_capable) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ }
+
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
@@ -757,10 +766,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
-
- /* We can now safely free the auth_chunk clone */
- kfree_skb(chunk->auth_chunk);
-
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2044,9 +2049,15 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
}
/* Delete the tempory new association. */
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+ /* Restore association pointer to provide SCTP command interpeter
+ * with a valid context in case it needs to manipulate
+ * the queues */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
+ SCTP_ASOC((struct sctp_association *)asoc));
+
return retval;
nomem:
@@ -3508,9 +3519,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
struct sctp_chunk *asconf_ack = NULL;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
- union sctp_addr_param *addr_param;
__u32 serial;
- int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3535,17 +3544,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
- addr_param = (union sctp_addr_param *)hdr->params;
- length = ntohs(addr_param->p.length);
- if (length < sizeof(sctp_paramhdr_t))
- return sctp_sf_violation_paramlen(ep, asoc, type, arg,
- (void *)addr_param, commands);
-
/* Verify the ASCONF chunk before processing it. */
- if (!sctp_verify_asconf(asoc,
- (sctp_paramhdr_t *)((void *)addr_param + length),
- (void *)chunk->chunk_end,
- &err_param))
+ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
return sctp_sf_violation_paramlen(ep, asoc, type, arg,
(void *)err_param, commands);
@@ -3612,6 +3612,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
+ if (asoc->new_transport) {
+ sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
+ commands);
+ ((struct sctp_association *)asoc)->new_transport = NULL;
+ }
return SCTP_DISPOSITION_CONSUME;
}
@@ -3657,10 +3662,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
- if (!sctp_verify_asconf(asoc,
- (sctp_paramhdr_t *)addip_hdr->params,
- (void *)asconf_ack->chunk_end,
- &err_param))
+ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
return sctp_sf_violation_paramlen(ep, asoc, type, arg,
(void *)err_param, commands);
@@ -4008,31 +4010,32 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
error = sctp_sf_authenticate(ep, asoc, type, chunk);
switch (error) {
- case SCTP_IERROR_AUTH_BAD_HMAC:
- /* Generate the ERROR chunk and discard the rest
- * of the packet
- */
- err_chunk = sctp_make_op_error(asoc, chunk,
- SCTP_ERROR_UNSUP_HMAC,
- &auth_hdr->hmac_id,
- sizeof(__u16), 0);
- if (err_chunk) {
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
- SCTP_CHUNK(err_chunk));
- }
- /* Fall Through */
- case SCTP_IERROR_AUTH_BAD_KEYID:
- case SCTP_IERROR_BAD_SIG:
- return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- break;
- case SCTP_IERROR_PROTO_VIOLATION:
- return sctp_sf_violation_chunklen(ep, asoc, type, arg,
- commands);
- break;
- case SCTP_IERROR_NOMEM:
- return SCTP_DISPOSITION_NOMEM;
- default:
- break;
+ case SCTP_IERROR_AUTH_BAD_HMAC:
+ /* Generate the ERROR chunk and discard the rest
+ * of the packet
+ */
+ err_chunk = sctp_make_op_error(asoc, chunk,
+ SCTP_ERROR_UNSUP_HMAC,
+ &auth_hdr->hmac_id,
+ sizeof(__u16), 0);
+ if (err_chunk) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(err_chunk));
+ }
+ /* Fall Through */
+ case SCTP_IERROR_AUTH_BAD_KEYID:
+ case SCTP_IERROR_BAD_SIG:
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+
+ case SCTP_IERROR_PROTO_VIOLATION:
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
+ case SCTP_IERROR_NOMEM:
+ return SCTP_DISPOSITION_NOMEM;
+
+ default: /* Prevent gcc warnings */
+ break;
}
if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d0a8a77..24e88af 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -70,6 +70,7 @@
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <linux/compat.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -78,6 +79,7 @@
#include <net/inet_common.h>
#include <linux/socket.h> /* for sa_family_t */
+#include <linux/export.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
@@ -476,7 +478,7 @@ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
/* The list may contain either IPv4 or IPv6 address;
* determine the address length for walking thru the list.
*/
- sa_addr = (struct sockaddr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
if (!af) {
retval = -EINVAL;
@@ -555,7 +557,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
if (!af) {
retval = -EINVAL;
@@ -583,22 +585,35 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
goto out;
}
- retval = sctp_send_asconf(asoc, chunk);
- if (retval)
- goto out;
-
/* Add the new addresses to the bind address list with
* use_as_src set to 0.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
+ addr = addr_buf;
af = sctp_get_af_specific(addr->v4.sin_family);
memcpy(&saveaddr, addr, af->sockaddr_len);
retval = sctp_add_bind_addr(bp, &saveaddr,
SCTP_ADDR_NEW, GFP_ATOMIC);
addr_buf += af->sockaddr_len;
}
+ if (asoc->src_out_of_asoc_ok) {
+ struct sctp_transport *trans;
+
+ list_for_each_entry(trans,
+ &asoc->peer.transport_addr_list, transports) {
+ /* Clear the source and route cache */
+ dst_release(trans->dst);
+ trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
+ 2*asoc->pathmtu, 4380));
+ trans->ssthresh = asoc->peer.i.a_rwnd;
+ trans->rto = asoc->rto_initial;
+ trans->rtt = trans->srtt = trans->rttvar = 0;
+ sctp_transport_route(trans, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+ }
+ retval = sctp_send_asconf(asoc, chunk);
}
out:
@@ -646,7 +661,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
goto err_bindx_rem;
}
- sa_addr = (union sctp_addr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
if (!af) {
retval = -EINVAL;
@@ -715,7 +730,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
+ int stored = 0;
+ chunk = NULL;
if (!sctp_addip_enable)
return retval;
@@ -743,7 +760,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- laddr = (union sctp_addr *)addr_buf;
+ laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
if (!af) {
retval = -EINVAL;
@@ -766,8 +783,40 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
bp = &asoc->base.bind_addr;
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
addrcnt, sp);
- if (!laddr)
- continue;
+ if ((laddr == NULL) && (addrcnt == 1)) {
+ if (asoc->asconf_addr_del_pending)
+ continue;
+ asoc->asconf_addr_del_pending =
+ kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
+ if (asoc->asconf_addr_del_pending == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ asoc->asconf_addr_del_pending->sa.sa_family =
+ addrs->sa_family;
+ asoc->asconf_addr_del_pending->v4.sin_port =
+ htons(bp->port);
+ if (addrs->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addrs;
+ asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
+ } else if (addrs->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addrs;
+ ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+ }
+ SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
+ " at %p\n", asoc, asoc->asconf_addr_del_pending,
+ asoc->asconf_addr_del_pending);
+ asoc->src_out_of_asoc_ok = 1;
+ stored = 1;
+ goto skip_mkasconf;
+ }
+
+ if (laddr == NULL)
+ return -EINVAL;
/* We do not need RCU protection throughout this loop
* because this is done under a socket lock from the
@@ -780,12 +829,13 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
goto out;
}
+skip_mkasconf:
/* Reset use_as_src flag for the addresses in the bind address
* list that are to be deleted.
*/
addr_buf = addrs;
for (i = 0; i < addrcnt; i++) {
- laddr = (union sctp_addr *)addr_buf;
+ laddr = addr_buf;
af = sctp_get_af_specific(laddr->v4.sin_family);
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, laddr))
@@ -805,12 +855,37 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
sctp_sk(asoc->base.sk));
}
+ if (stored)
+ /* We don't need to transmit ASCONF */
+ continue;
retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
}
+/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
+int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
+{
+ struct sock *sk = sctp_opt2sk(sp);
+ union sctp_addr *addr;
+ struct sctp_af *af;
+
+ /* It is safe to write port space in caller. */
+ addr = &addrw->a;
+ addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (!af)
+ return -EINVAL;
+ if (sctp_verify_addr(sk, addr, af->sockaddr_len))
+ return -EINVAL;
+
+ if (addrw->state == SCTP_ADDR_NEW)
+ return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
+ else
+ return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
+}
+
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
*
* API 8.1
@@ -927,7 +1002,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
return -EINVAL;
}
- sa_addr = (struct sockaddr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
/* If the address family is not supported or if this address
@@ -1018,7 +1093,7 @@ static int __sctp_connect(struct sock* sk,
goto out_free;
}
- sa_addr = (union sctp_addr *)addr_buf;
+ sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
/* If the address family is not supported or if this address
@@ -1302,11 +1377,19 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
/*
* New (hopefully final) interface for the API.
* We use the sctp_getaddrs_old structure so that use-space library
- * can avoid any unnecessary allocations. The only defferent part
+ * can avoid any unnecessary allocations. The only different part
* is that we store the actual length of the address buffer into the
- * addrs_num structure member. That way we can re-use the existing
+ * addrs_num structure member. That way we can re-use the existing
* code.
*/
+#ifdef CONFIG_COMPAT
+struct compat_sctp_getaddrs_old {
+ sctp_assoc_t assoc_id;
+ s32 addr_num;
+ compat_uptr_t addrs; /* struct sockaddr * */
+};
+#endif
+
SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
char __user *optval,
int __user *optlen)
@@ -1315,16 +1398,30 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
sctp_assoc_t assoc_id = 0;
int err = 0;
- if (len < sizeof(param))
- return -EINVAL;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct compat_sctp_getaddrs_old param32;
- if (copy_from_user(&param, optval, sizeof(param)))
- return -EFAULT;
+ if (len < sizeof(param32))
+ return -EINVAL;
+ if (copy_from_user(&param32, optval, sizeof(param32)))
+ return -EFAULT;
- err = __sctp_setsockopt_connectx(sk,
- (struct sockaddr __user *)param.addrs,
- param.addr_num, &assoc_id);
+ param.assoc_id = param32.assoc_id;
+ param.addr_num = param32.addr_num;
+ param.addrs = compat_ptr(param32.addrs);
+ } else
+#endif
+ {
+ if (len < sizeof(param))
+ return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+ }
+ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
+ param.addrs, param.addr_num,
+ &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
return -EFAULT;
@@ -1442,8 +1539,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
/* Supposedly, no process has access to the socket, but
* the net layers still may.
+ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+ * held and that should be grabbed before socket lock.
*/
- sctp_local_bh_disable();
+ spin_lock_bh(&sctp_globals.addr_wq_lock);
sctp_bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
@@ -1453,7 +1552,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sk_common_release(sk);
sctp_bh_unlock_sock(sk);
- sctp_local_bh_enable();
+ spin_unlock_bh(&sctp_globals.addr_wq_lock);
sock_put(sk);
@@ -1514,6 +1613,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_scope_t scope;
long timeo;
__u16 sinfo_flags = 0;
+ bool wait_connect = false;
struct sctp_datamsg *datamsg;
int msg_flags = msg->msg_flags;
@@ -1832,6 +1932,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
err = sctp_primitive_ASSOCIATE(asoc, NULL);
if (err < 0)
goto out_free;
+ wait_connect = true;
SCTP_DEBUG_PRINTK("We associated primitively.\n");
}
@@ -1871,6 +1972,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
else
err = msg_len;
+ if (unlikely(wait_connect)) {
+ timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
+ sctp_wait_for_connect(asoc, &timeo);
+ }
+
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
*/
@@ -3219,11 +3325,11 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
return -EFAULT;
switch (val.sauth_chunk) {
- case SCTP_CID_INIT:
- case SCTP_CID_INIT_ACK:
- case SCTP_CID_SHUTDOWN_COMPLETE:
- case SCTP_CID_AUTH:
- return -EINVAL;
+ case SCTP_CID_INIT:
+ case SCTP_CID_INIT_ACK:
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ case SCTP_CID_AUTH:
+ return -EINVAL;
}
/* add this chunk id to the endpoint */
@@ -3366,6 +3472,48 @@ static int sctp_setsockopt_del_key(struct sock *sk,
}
+/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ *
+ * This option will enable or disable the use of the automatic generation of
+ * ASCONF chunks to add and delete addresses to an existing association. Note
+ * that this option has two caveats namely: a) it only affects sockets that
+ * are bound to all addresses available to the SCTP stack, and b) the system
+ * administrator may have an overriding control that turns the ASCONF feature
+ * off no matter what setting the socket option may have.
+ * This option expects an integer boolean flag, where a non-zero value turns on
+ * the option, and a zero value turns off the option.
+ * Note. In this implementation, socket operation overrides default parameter
+ * being set by sysctl as well as FreeBSD implementation
+ */
+static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ unsigned int optlen)
+{
+ int val;
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+ if (!sctp_is_ep_boundall(sk) && val)
+ return -EINVAL;
+ if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+ return 0;
+
+ spin_lock_bh(&sctp_globals.addr_wq_lock);
+ if (val == 0 && sp->do_auto_asconf) {
+ list_del(&sp->auto_asconf_list);
+ sp->do_auto_asconf = 0;
+ } else if (val && !sp->do_auto_asconf) {
+ list_add_tail(&sp->auto_asconf_list,
+ &sctp_auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ }
+ spin_unlock_bh(&sctp_globals.addr_wq_lock);
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
@@ -3513,6 +3661,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_AUTH_DELETE_KEY:
retval = sctp_setsockopt_del_key(sk, optval, optlen);
break;
+ case SCTP_AUTO_ASCONF:
+ retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -3795,27 +3946,47 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+
+ /* Nothing can fail after this block, otherwise
+ * sctp_destroy_sock() will be called without addr_wq_lock held
+ */
+ if (sctp_default_auto_asconf) {
+ spin_lock(&sctp_globals.addr_wq_lock);
+ list_add_tail(&sp->auto_asconf_list,
+ &sctp_auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ spin_unlock(&sctp_globals.addr_wq_lock);
+ } else {
+ sp->do_auto_asconf = 0;
+ }
+
local_bh_enable();
return 0;
}
-/* Cleanup any SCTP per socket resources. */
+/* Cleanup any SCTP per socket resources. Must be called with
+ * sctp_globals.addr_wq_lock held if sp->do_auto_asconf is true
+ */
SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
{
- struct sctp_endpoint *ep;
+ struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
/* Release our hold on the endpoint. */
- ep = sctp_sk(sk)->ep;
+ sp = sctp_sk(sk);
/* This could happen during socket init, thus we bail out
* early, since the rest of the below is not setup either.
*/
- if (ep == NULL)
+ if (sp->ep == NULL)
return;
- sctp_endpoint_free(ep);
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
+ }
+ sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
@@ -5316,6 +5487,28 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
}
/*
+ * 8.1.23 SCTP_AUTO_ASCONF
+ * See the corresponding setsockopt entry as description
+ */
+static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ int val = 0;
+
+ if (len < sizeof(int))
+ return -EINVAL;
+
+ len = sizeof(int);
+ if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
+ val = 1;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+/*
* 8.2.6. Get the Current Identifiers of Associations
* (SCTP_GET_ASSOC_ID_LIST)
*
@@ -5499,6 +5692,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_GET_ASSOC_ID_LIST:
retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
break;
+ case SCTP_AUTO_ASCONF:
+ retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -6537,6 +6733,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newinet->mc_list = NULL;
}
+static inline void sctp_copy_descendant(struct sock *sk_to,
+ const struct sock *sk_from)
+{
+ int ancestor_size = sizeof(struct inet_sock) +
+ sizeof(struct sctp_sock) -
+ offsetof(struct sctp_sock, auto_asconf_list);
+
+ if (sk_from->sk_family == PF_INET6)
+ ancestor_size += sizeof(struct ipv6_pinfo);
+
+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+}
+
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
@@ -6558,7 +6767,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
- inet_sk_copy_descendant(newsk, oldsk);
+ sctp_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6752f48..60ffbd0 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -187,6 +187,13 @@ static ctl_table sctp_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "default_auto_asconf",
+ .data = &sctp_default_auto_asconf,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "prsctp_enable",
.data = &sctp_prsctp_enable,
.maxlen = sizeof(int),
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8a84017..57da447 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -373,9 +373,10 @@ fail:
* specification [SCTP] and any extensions for a list of possible
* error formats.
*/
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
- const struct sctp_association *asoc, struct sctp_chunk *chunk,
- __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+ struct sctp_chunk *chunk, __u16 flags,
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
@@ -394,8 +395,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
- 0, gfp);
+ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull(chunk->skb, elen);
@@ -406,62 +406,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
event = sctp_skb2event(skb);
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
- sre = (struct sctp_remote_error *)
- skb_push(skb, sizeof(struct sctp_remote_error));
+ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
/* Trim the buffer to the right length. */
- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+ skb_trim(skb, sizeof(*sre) + elen);
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_type:
- * It should be SCTP_REMOTE_ERROR.
- */
+ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+ memset(sre, 0, sizeof(*sre));
sre->sre_type = SCTP_REMOTE_ERROR;
-
- /*
- * Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_flags: 16 bits (unsigned integer)
- * Currently unused.
- */
sre->sre_flags = 0;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_length: sizeof (__u32)
- *
- * This field is the total length of the notification data,
- * including the notification header.
- */
sre->sre_length = skb->len;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_error: 16 bits (unsigned integer)
- * This value represents one of the Operational Error causes defined in
- * the SCTP specification, in network byte order.
- */
sre->sre_error = cause;
-
- /* Socket Extensions for SCTP
- * 5.3.1.3 SCTP_REMOTE_ERROR
- *
- * sre_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association id field, holds the identifier for the association.
- * All notifications for a given association have the same association
- * identifier. For TCP style socket, this field is ignored.
- */
sctp_ulpevent_set_owner(event, asoc);
sre->sre_assoc_id = sctp_assoc2id(asoc);
return event;
-
fail:
return NULL;
}
@@ -904,7 +863,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
return notification->sn_header.sn_type;
}
-/* Copy out the sndrcvinfo into a msghdr. */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *msghdr)
{
@@ -913,74 +874,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
if (sctp_ulpevent_is_notification(event))
return;
- /* Sockets API Extensions for SCTP
- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
- *
- * sinfo_stream: 16 bits (unsigned integer)
- *
- * For recvmsg() the SCTP stack places the message's stream number in
- * this value.
- */
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.sinfo_stream = event->stream;
- /* sinfo_ssn: 16 bits (unsigned integer)
- *
- * For recvmsg() this value contains the stream sequence number that
- * the remote endpoint placed in the DATA chunk. For fragmented
- * messages this is the same number for all deliveries of the message
- * (if more than one recvmsg() is needed to read the message).
- */
sinfo.sinfo_ssn = event->ssn;
- /* sinfo_ppid: 32 bits (unsigned integer)
- *
- * In recvmsg() this value is
- * the same information that was passed by the upper layer in the peer
- * application. Please note that byte order issues are NOT accounted
- * for and this information is passed opaquely by the SCTP stack from
- * one end to the other.
- */
sinfo.sinfo_ppid = event->ppid;
- /* sinfo_flags: 16 bits (unsigned integer)
- *
- * This field may contain any of the following flags and is composed of
- * a bitwise OR of these values.
- *
- * recvmsg() flags:
- *
- * SCTP_UNORDERED - This flag is present when the message was sent
- * non-ordered.
- */
sinfo.sinfo_flags = event->flags;
- /* sinfo_tsn: 32 bit (unsigned integer)
- *
- * For the receiving side, this field holds a TSN that was
- * assigned to one of the SCTP Data Chunks.
- */
sinfo.sinfo_tsn = event->tsn;
- /* sinfo_cumtsn: 32 bit (unsigned integer)
- *
- * This field will hold the current cumulative TSN as
- * known by the underlying SCTP layer. Note this field is
- * ignored when sending and only valid for a receive
- * operation when sinfo_flags are set to SCTP_UNORDERED.
- */
sinfo.sinfo_cumtsn = event->cumtsn;
- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
- *
- * The association handle field, sinfo_assoc_id, holds the identifier
- * for the association announced in the COMMUNICATION_UP notification.
- * All notifications for a given association have the same identifier.
- * Ignored for one-to-one style sockets.
- */
sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
- /* context value that is set via SCTP_CONTEXT socket option. */
+ /* Context value that is set via SCTP_CONTEXT socket option. */
sinfo.sinfo_context = event->asoc->default_rcv_context;
-
/* These fields are not used while receiving. */
sinfo.sinfo_timetolive = 0;
put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+ sizeof(sinfo), &sinfo);
}
/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5867429..07b9973 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -237,21 +237,21 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
#endif
) {
switch (event) {
- case NETDEV_UP:
- x25_link_device_up(dev);
- break;
- case NETDEV_GOING_DOWN:
- nb = x25_get_neigh(dev);
- if (nb) {
- x25_terminate_link(nb);
- x25_neigh_put(nb);
- }
- break;
- case NETDEV_DOWN:
- x25_kill_by_device(dev);
- x25_route_device_down(dev);
- x25_link_device_down(dev);
- break;
+ case NETDEV_UP:
+ x25_link_device_up(dev);
+ break;
+ case NETDEV_GOING_DOWN:
+ nb = x25_get_neigh(dev);
+ if (nb) {
+ x25_terminate_link(nb);
+ x25_neigh_put(nb);
+ }
+ break;
+ case NETDEV_DOWN:
+ x25_kill_by_device(dev);
+ x25_route_device_down(dev);
+ x25_link_device_down(dev);
+ break;
}
}
@@ -1261,14 +1261,19 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct x25_sock *x25 = x25_sk(sk);
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
size_t copied;
- int qbit, header_len = x25->neighbour->extended ?
- X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
-
+ int qbit, header_len;
struct sk_buff *skb;
unsigned char *asmptr;
int rc = -ENOTCONN;
lock_sock(sk);
+
+ if (x25->neighbour == NULL)
+ goto out;
+
+ header_len = x25->neighbour->extended ?
+ X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
+
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
@@ -1338,10 +1343,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sx25) {
sx25->sx25_family = AF_X25;
sx25->sx25_addr = x25->dest_addr;
+ msg->msg_namelen = sizeof(*sx25);
}
- msg->msg_namelen = sizeof(struct sockaddr_x25);
-
x25_check_rbuf(sk);
rc = copied;
out_free_dgram:
@@ -1360,257 +1364,254 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
int rc;
switch (cmd) {
- case TIOCOUTQ: {
- int amount;
+ case TIOCOUTQ: {
+ int amount;
- amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
- if (amount < 0)
- amount = 0;
- rc = put_user(amount, (unsigned int __user *)argp);
- break;
- }
+ amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+ if (amount < 0)
+ amount = 0;
+ rc = put_user(amount, (unsigned int __user *)argp);
+ break;
+ }
- case TIOCINQ: {
- struct sk_buff *skb;
- int amount = 0;
- /*
- * These two are safe on a single CPU system as
- * only user tasks fiddle here
- */
- lock_sock(sk);
- if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
- amount = skb->len;
- release_sock(sk);
- rc = put_user(amount, (unsigned int __user *)argp);
- break;
- }
+ case TIOCINQ: {
+ struct sk_buff *skb;
+ int amount = 0;
+ /*
+ * These two are safe on a single CPU system as
+ * only user tasks fiddle here
+ */
+ lock_sock(sk);
+ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
+ amount = skb->len;
+ release_sock(sk);
+ rc = put_user(amount, (unsigned int __user *)argp);
+ break;
+ }
- case SIOCGSTAMP:
- rc = -EINVAL;
- if (sk)
- rc = sock_get_timestamp(sk,
+ case SIOCGSTAMP:
+ rc = -EINVAL;
+ if (sk)
+ rc = sock_get_timestamp(sk,
(struct timeval __user *)argp);
+ break;
+ case SIOCGSTAMPNS:
+ rc = -EINVAL;
+ if (sk)
+ rc = sock_get_timestampns(sk,
+ (struct timespec __user *)argp);
+ break;
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ rc = -EINVAL;
+ break;
+ case SIOCADDRT:
+ case SIOCDELRT:
+ rc = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
break;
- case SIOCGSTAMPNS:
- rc = -EINVAL;
- if (sk)
- rc = sock_get_timestampns(sk,
- (struct timespec __user *)argp);
- break;
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- case SIOCGIFDSTADDR:
- case SIOCSIFDSTADDR:
- case SIOCGIFBRDADDR:
- case SIOCSIFBRDADDR:
- case SIOCGIFNETMASK:
- case SIOCSIFNETMASK:
- case SIOCGIFMETRIC:
- case SIOCSIFMETRIC:
- rc = -EINVAL;
- break;
- case SIOCADDRT:
- case SIOCDELRT:
- rc = -EPERM;
- if (!capable(CAP_NET_ADMIN))
- break;
- rc = x25_route_ioctl(cmd, argp);
- break;
- case SIOCX25GSUBSCRIP:
- rc = x25_subscr_ioctl(cmd, argp);
- break;
- case SIOCX25SSUBSCRIP:
- rc = -EPERM;
- if (!capable(CAP_NET_ADMIN))
- break;
- rc = x25_subscr_ioctl(cmd, argp);
- break;
- case SIOCX25GFACILITIES: {
- lock_sock(sk);
- rc = copy_to_user(argp, &x25->facilities,
- sizeof(x25->facilities))
- ? -EFAULT : 0;
- release_sock(sk);
+ rc = x25_route_ioctl(cmd, argp);
+ break;
+ case SIOCX25GSUBSCRIP:
+ rc = x25_subscr_ioctl(cmd, argp);
+ break;
+ case SIOCX25SSUBSCRIP:
+ rc = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
break;
- }
+ rc = x25_subscr_ioctl(cmd, argp);
+ break;
+ case SIOCX25GFACILITIES: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->facilities,
+ sizeof(x25->facilities))
+ ? -EFAULT : 0;
+ release_sock(sk);
+ break;
+ }
- case SIOCX25SFACILITIES: {
- struct x25_facilities facilities;
- rc = -EFAULT;
- if (copy_from_user(&facilities, argp,
- sizeof(facilities)))
- break;
- rc = -EINVAL;
- lock_sock(sk);
- if (sk->sk_state != TCP_LISTEN &&
- sk->sk_state != TCP_CLOSE)
- goto out_fac_release;
- if (facilities.pacsize_in < X25_PS16 ||
- facilities.pacsize_in > X25_PS4096)
- goto out_fac_release;
- if (facilities.pacsize_out < X25_PS16 ||
- facilities.pacsize_out > X25_PS4096)
- goto out_fac_release;
- if (facilities.winsize_in < 1 ||
- facilities.winsize_in > 127)
+ case SIOCX25SFACILITIES: {
+ struct x25_facilities facilities;
+ rc = -EFAULT;
+ if (copy_from_user(&facilities, argp, sizeof(facilities)))
+ break;
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_CLOSE)
+ goto out_fac_release;
+ if (facilities.pacsize_in < X25_PS16 ||
+ facilities.pacsize_in > X25_PS4096)
+ goto out_fac_release;
+ if (facilities.pacsize_out < X25_PS16 ||
+ facilities.pacsize_out > X25_PS4096)
+ goto out_fac_release;
+ if (facilities.winsize_in < 1 ||
+ facilities.winsize_in > 127)
+ goto out_fac_release;
+ if (facilities.throughput) {
+ int out = facilities.throughput & 0xf0;
+ int in = facilities.throughput & 0x0f;
+ if (!out)
+ facilities.throughput |=
+ X25_DEFAULT_THROUGHPUT << 4;
+ else if (out < 0x30 || out > 0xD0)
goto out_fac_release;
- if (facilities.throughput) {
- int out = facilities.throughput & 0xf0;
- int in = facilities.throughput & 0x0f;
- if (!out)
- facilities.throughput |=
- X25_DEFAULT_THROUGHPUT << 4;
- else if (out < 0x30 || out > 0xD0)
- goto out_fac_release;
- if (!in)
- facilities.throughput |=
- X25_DEFAULT_THROUGHPUT;
- else if (in < 0x03 || in > 0x0D)
- goto out_fac_release;
- }
- if (facilities.reverse &&
- (facilities.reverse & 0x81) != 0x81)
+ if (!in)
+ facilities.throughput |=
+ X25_DEFAULT_THROUGHPUT;
+ else if (in < 0x03 || in > 0x0D)
goto out_fac_release;
- x25->facilities = facilities;
- rc = 0;
-out_fac_release:
- release_sock(sk);
- break;
- }
-
- case SIOCX25GDTEFACILITIES: {
- lock_sock(sk);
- rc = copy_to_user(argp, &x25->dte_facilities,
- sizeof(x25->dte_facilities));
- release_sock(sk);
- if (rc)
- rc = -EFAULT;
- break;
}
+ if (facilities.reverse &&
+ (facilities.reverse & 0x81) != 0x81)
+ goto out_fac_release;
+ x25->facilities = facilities;
+ rc = 0;
+out_fac_release:
+ release_sock(sk);
+ break;
+ }
- case SIOCX25SDTEFACILITIES: {
- struct x25_dte_facilities dtefacs;
+ case SIOCX25GDTEFACILITIES: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->dte_facilities,
+ sizeof(x25->dte_facilities));
+ release_sock(sk);
+ if (rc)
rc = -EFAULT;
- if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
- break;
- rc = -EINVAL;
- lock_sock(sk);
- if (sk->sk_state != TCP_LISTEN &&
- sk->sk_state != TCP_CLOSE)
- goto out_dtefac_release;
- if (dtefacs.calling_len > X25_MAX_AE_LEN)
- goto out_dtefac_release;
- if (dtefacs.calling_ae == NULL)
- goto out_dtefac_release;
- if (dtefacs.called_len > X25_MAX_AE_LEN)
- goto out_dtefac_release;
- if (dtefacs.called_ae == NULL)
- goto out_dtefac_release;
- x25->dte_facilities = dtefacs;
- rc = 0;
-out_dtefac_release:
- release_sock(sk);
- break;
- }
+ break;
+ }
- case SIOCX25GCALLUSERDATA: {
- lock_sock(sk);
- rc = copy_to_user(argp, &x25->calluserdata,
- sizeof(x25->calluserdata))
- ? -EFAULT : 0;
- release_sock(sk);
+ case SIOCX25SDTEFACILITIES: {
+ struct x25_dte_facilities dtefacs;
+ rc = -EFAULT;
+ if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
break;
- }
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_LISTEN &&
+ sk->sk_state != TCP_CLOSE)
+ goto out_dtefac_release;
+ if (dtefacs.calling_len > X25_MAX_AE_LEN)
+ goto out_dtefac_release;
+ if (dtefacs.calling_ae == NULL)
+ goto out_dtefac_release;
+ if (dtefacs.called_len > X25_MAX_AE_LEN)
+ goto out_dtefac_release;
+ if (dtefacs.called_ae == NULL)
+ goto out_dtefac_release;
+ x25->dte_facilities = dtefacs;
+ rc = 0;
+out_dtefac_release:
+ release_sock(sk);
+ break;
+ }
- case SIOCX25SCALLUSERDATA: {
- struct x25_calluserdata calluserdata;
+ case SIOCX25GCALLUSERDATA: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->calluserdata,
+ sizeof(x25->calluserdata))
+ ? -EFAULT : 0;
+ release_sock(sk);
+ break;
+ }
- rc = -EFAULT;
- if (copy_from_user(&calluserdata, argp,
- sizeof(calluserdata)))
- break;
- rc = -EINVAL;
- if (calluserdata.cudlength > X25_MAX_CUD_LEN)
- break;
- lock_sock(sk);
- x25->calluserdata = calluserdata;
- release_sock(sk);
- rc = 0;
- break;
- }
+ case SIOCX25SCALLUSERDATA: {
+ struct x25_calluserdata calluserdata;
- case SIOCX25GCAUSEDIAG: {
- lock_sock(sk);
- rc = copy_to_user(argp, &x25->causediag,
- sizeof(x25->causediag))
- ? -EFAULT : 0;
- release_sock(sk);
+ rc = -EFAULT;
+ if (copy_from_user(&calluserdata, argp, sizeof(calluserdata)))
break;
- }
+ rc = -EINVAL;
+ if (calluserdata.cudlength > X25_MAX_CUD_LEN)
+ break;
+ lock_sock(sk);
+ x25->calluserdata = calluserdata;
+ release_sock(sk);
+ rc = 0;
+ break;
+ }
- case SIOCX25SCAUSEDIAG: {
- struct x25_causediag causediag;
- rc = -EFAULT;
- if (copy_from_user(&causediag, argp, sizeof(causediag)))
- break;
- lock_sock(sk);
- x25->causediag = causediag;
- release_sock(sk);
- rc = 0;
+ case SIOCX25GCAUSEDIAG: {
+ lock_sock(sk);
+ rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag))
+ ? -EFAULT : 0;
+ release_sock(sk);
+ break;
+ }
+
+ case SIOCX25SCAUSEDIAG: {
+ struct x25_causediag causediag;
+ rc = -EFAULT;
+ if (copy_from_user(&causediag, argp, sizeof(causediag)))
break;
+ lock_sock(sk);
+ x25->causediag = causediag;
+ release_sock(sk);
+ rc = 0;
+ break;
- }
+ }
- case SIOCX25SCUDMATCHLEN: {
- struct x25_subaddr sub_addr;
- rc = -EINVAL;
- lock_sock(sk);
- if(sk->sk_state != TCP_CLOSE)
- goto out_cud_release;
- rc = -EFAULT;
- if (copy_from_user(&sub_addr, argp,
- sizeof(sub_addr)))
- goto out_cud_release;
- rc = -EINVAL;
- if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
- goto out_cud_release;
- x25->cudmatchlength = sub_addr.cudmatchlength;
- rc = 0;
+ case SIOCX25SCUDMATCHLEN: {
+ struct x25_subaddr sub_addr;
+ rc = -EINVAL;
+ lock_sock(sk);
+ if(sk->sk_state != TCP_CLOSE)
+ goto out_cud_release;
+ rc = -EFAULT;
+ if (copy_from_user(&sub_addr, argp,
+ sizeof(sub_addr)))
+ goto out_cud_release;
+ rc = -EINVAL;
+ if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
+ goto out_cud_release;
+ x25->cudmatchlength = sub_addr.cudmatchlength;
+ rc = 0;
out_cud_release:
- release_sock(sk);
- break;
- }
+ release_sock(sk);
+ break;
+ }
- case SIOCX25CALLACCPTAPPRV: {
- rc = -EINVAL;
- lock_sock(sk);
- if (sk->sk_state == TCP_CLOSE) {
- clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
- rc = 0;
- }
- release_sock(sk);
- break;
+ case SIOCX25CALLACCPTAPPRV: {
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state == TCP_CLOSE) {
+ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
+ rc = 0;
}
+ release_sock(sk);
+ break;
+ }
- case SIOCX25SENDCALLACCPT: {
- rc = -EINVAL;
- lock_sock(sk);
- if (sk->sk_state != TCP_ESTABLISHED)
- goto out_sendcallaccpt_release;
- /* must call accptapprv above */
- if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
- goto out_sendcallaccpt_release;
- x25_write_internal(sk, X25_CALL_ACCEPTED);
- x25->state = X25_STATE_3;
- rc = 0;
+ case SIOCX25SENDCALLACCPT: {
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto out_sendcallaccpt_release;
+ /* must call accptapprv above */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
+ goto out_sendcallaccpt_release;
+ x25_write_internal(sk, X25_CALL_ACCEPTED);
+ x25->state = X25_STATE_3;
+ rc = 0;
out_sendcallaccpt_release:
- release_sock(sk);
- break;
- }
+ release_sock(sk);
+ break;
+ }
- default:
- rc = -ENOIOCTLCMD;
- break;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
}
return rc;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 60749c5..fa2b418 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -152,21 +152,21 @@ void x25_establish_link(struct x25_neigh *nb)
unsigned char *ptr;
switch (nb->dev->type) {
- case ARPHRD_X25:
- if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
- printk(KERN_ERR "x25_dev: out of memory\n");
- return;
- }
- ptr = skb_put(skb, 1);
- *ptr = X25_IFACE_CONNECT;
- break;
+ case ARPHRD_X25:
+ if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
+ printk(KERN_ERR "x25_dev: out of memory\n");
+ return;
+ }
+ ptr = skb_put(skb, 1);
+ *ptr = X25_IFACE_CONNECT;
+ break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
- case ARPHRD_ETHER:
- return;
+ case ARPHRD_ETHER:
+ return;
#endif
- default:
- return;
+ default:
+ return;
}
skb->protocol = htons(ETH_P_X25);
@@ -208,19 +208,19 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
skb_reset_network_header(skb);
switch (nb->dev->type) {
- case ARPHRD_X25:
- dptr = skb_push(skb, 1);
- *dptr = X25_IFACE_DATA;
- break;
+ case ARPHRD_X25:
+ dptr = skb_push(skb, 1);
+ *dptr = X25_IFACE_DATA;
+ break;
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
- case ARPHRD_ETHER:
- kfree_skb(skb);
- return;
+ case ARPHRD_ETHER:
+ kfree_skb(skb);
+ return;
#endif
- default:
- kfree_skb(skb);
- return;
+ default:
+ kfree_skb(skb);
+ return;
}
skb->protocol = htons(ETH_P_X25);
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 36ab913..a49cd4e 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -94,62 +94,62 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
struct x25_sock *x25 = x25_sk(sk);
switch (frametype) {
- case X25_CALL_ACCEPTED: {
-
- x25_stop_timer(sk);
- x25->condition = 0x00;
- x25->vs = 0;
- x25->va = 0;
- x25->vr = 0;
- x25->vl = 0;
- x25->state = X25_STATE_3;
- sk->sk_state = TCP_ESTABLISHED;
- /*
- * Parse the data in the frame.
- */
- if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
- goto out_clear;
- skb_pull(skb, X25_STD_MIN_LEN);
-
- len = x25_parse_address_block(skb, &source_addr,
- &dest_addr);
- if (len > 0)
- skb_pull(skb, len);
- else if (len < 0)
+ case X25_CALL_ACCEPTED: {
+
+ x25_stop_timer(sk);
+ x25->condition = 0x00;
+ x25->vs = 0;
+ x25->va = 0;
+ x25->vr = 0;
+ x25->vl = 0;
+ x25->state = X25_STATE_3;
+ sk->sk_state = TCP_ESTABLISHED;
+ /*
+ * Parse the data in the frame.
+ */
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
+ goto out_clear;
+ skb_pull(skb, X25_STD_MIN_LEN);
+
+ len = x25_parse_address_block(skb, &source_addr,
+ &dest_addr);
+ if (len > 0)
+ skb_pull(skb, len);
+ else if (len < 0)
+ goto out_clear;
+
+ len = x25_parse_facilities(skb, &x25->facilities,
+ &x25->dte_facilities,
+ &x25->vc_facil_mask);
+ if (len > 0)
+ skb_pull(skb, len);
+ else if (len < 0)
+ goto out_clear;
+ /*
+ * Copy any Call User Data.
+ */
+ if (skb->len > 0) {
+ if (skb->len > X25_MAX_CUD_LEN)
goto out_clear;
- len = x25_parse_facilities(skb, &x25->facilities,
- &x25->dte_facilities,
- &x25->vc_facil_mask);
- if (len > 0)
- skb_pull(skb, len);
- else if (len < 0)
- goto out_clear;
- /*
- * Copy any Call User Data.
- */
- if (skb->len > 0) {
- if (skb->len > X25_MAX_CUD_LEN)
- goto out_clear;
-
- skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
- skb->len);
- x25->calluserdata.cudlength = skb->len;
- }
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_state_change(sk);
- break;
+ skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
+ skb->len);
+ x25->calluserdata.cudlength = skb->len;
}
- case X25_CLEAR_REQUEST:
- if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
- goto out_clear;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+ break;
+ }
+ case X25_CLEAR_REQUEST:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
+ goto out_clear;
- x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
- x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
- break;
+ x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
+ break;
- default:
- break;
+ default:
+ break;
}
return 0;
@@ -387,18 +387,18 @@ int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
switch (x25->state) {
- case X25_STATE_1:
- queued = x25_state1_machine(sk, skb, frametype);
- break;
- case X25_STATE_2:
- queued = x25_state2_machine(sk, skb, frametype);
- break;
- case X25_STATE_3:
- queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
- break;
- case X25_STATE_4:
- queued = x25_state4_machine(sk, skb, frametype);
- break;
+ case X25_STATE_1:
+ queued = x25_state1_machine(sk, skb, frametype);
+ break;
+ case X25_STATE_2:
+ queued = x25_state2_machine(sk, skb, frametype);
+ break;
+ case X25_STATE_3:
+ queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
+ break;
+ case X25_STATE_4:
+ queued = x25_state4_machine(sk, skb, frametype);
+ break;
}
x25_kick(sk);
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 0a9e074..4acacf3 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -76,33 +76,32 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
int confirm;
switch (frametype) {
- case X25_RESTART_REQUEST:
- confirm = !x25_t20timer_pending(nb);
- x25_stop_t20timer(nb);
- nb->state = X25_LINK_STATE_3;
- if (confirm)
- x25_transmit_restart_confirmation(nb);
+ case X25_RESTART_REQUEST:
+ confirm = !x25_t20timer_pending(nb);
+ x25_stop_t20timer(nb);
+ nb->state = X25_LINK_STATE_3;
+ if (confirm)
+ x25_transmit_restart_confirmation(nb);
+ break;
+
+ case X25_RESTART_CONFIRMATION:
+ x25_stop_t20timer(nb);
+ nb->state = X25_LINK_STATE_3;
+ break;
+
+ case X25_DIAGNOSTIC:
+ if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
break;
- case X25_RESTART_CONFIRMATION:
- x25_stop_t20timer(nb);
- nb->state = X25_LINK_STATE_3;
- break;
-
- case X25_DIAGNOSTIC:
- if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
- break;
+ printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
+ skb->data[3], skb->data[4],
+ skb->data[5], skb->data[6]);
+ break;
- printk(KERN_WARNING "x25: diagnostic #%d - "
- "%02X %02X %02X\n",
- skb->data[3], skb->data[4],
- skb->data[5], skb->data[6]);
- break;
-
- default:
- printk(KERN_WARNING "x25: received unknown %02X "
- "with LCI 000\n", frametype);
- break;
+ default:
+ printk(KERN_WARNING "x25: received unknown %02X with LCI 000\n",
+ frametype);
+ break;
}
if (nb->state == X25_LINK_STATE_3)
@@ -196,18 +195,18 @@ void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
{
switch (nb->state) {
- case X25_LINK_STATE_0:
- skb_queue_tail(&nb->queue, skb);
- nb->state = X25_LINK_STATE_1;
- x25_establish_link(nb);
- break;
- case X25_LINK_STATE_1:
- case X25_LINK_STATE_2:
- skb_queue_tail(&nb->queue, skb);
- break;
- case X25_LINK_STATE_3:
- x25_send_frame(skb, nb);
- break;
+ case X25_LINK_STATE_0:
+ skb_queue_tail(&nb->queue, skb);
+ nb->state = X25_LINK_STATE_1;
+ x25_establish_link(nb);
+ break;
+ case X25_LINK_STATE_1:
+ case X25_LINK_STATE_2:
+ skb_queue_tail(&nb->queue, skb);
+ break;
+ case X25_LINK_STATE_3:
+ x25_send_frame(skb, nb);
+ break;
}
}
@@ -217,14 +216,14 @@ void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
void x25_link_established(struct x25_neigh *nb)
{
switch (nb->state) {
- case X25_LINK_STATE_0:
- nb->state = X25_LINK_STATE_2;
- break;
- case X25_LINK_STATE_1:
- x25_transmit_restart_request(nb);
- nb->state = X25_LINK_STATE_2;
- x25_start_t20timer(nb);
- break;
+ case X25_LINK_STATE_0:
+ nb->state = X25_LINK_STATE_2;
+ break;
+ case X25_LINK_STATE_1:
+ x25_transmit_restart_request(nb);
+ nb->state = X25_LINK_STATE_2;
+ x25_start_t20timer(nb);
+ break;
}
}
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 7ff3737..2ffde46 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/x25.h>
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index faf98d8..5170d52 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -126,32 +126,30 @@ void x25_write_internal(struct sock *sk, int frametype)
* Adjust frame size.
*/
switch (frametype) {
- case X25_CALL_REQUEST:
- len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN +
- X25_MAX_CUD_LEN;
- break;
- case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */
- if(x25->facilities.reverse & 0x80) {
- len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
- } else {
- len += 1 + X25_MAX_FAC_LEN;
- }
- break;
- case X25_CLEAR_REQUEST:
- case X25_RESET_REQUEST:
- len += 2;
- break;
- case X25_RR:
- case X25_RNR:
- case X25_REJ:
- case X25_CLEAR_CONFIRMATION:
- case X25_INTERRUPT_CONFIRMATION:
- case X25_RESET_CONFIRMATION:
- break;
- default:
- printk(KERN_ERR "X.25: invalid frame type %02X\n",
- frametype);
- return;
+ case X25_CALL_REQUEST:
+ len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
+ break;
+ case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */
+ if (x25->facilities.reverse & 0x80) {
+ len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
+ } else {
+ len += 1 + X25_MAX_FAC_LEN;
+ }
+ break;
+ case X25_CLEAR_REQUEST:
+ case X25_RESET_REQUEST:
+ len += 2;
+ break;
+ case X25_RR:
+ case X25_RNR:
+ case X25_REJ:
+ case X25_CLEAR_CONFIRMATION:
+ case X25_INTERRUPT_CONFIRMATION:
+ case X25_RESET_CONFIRMATION:
+ break;
+ default:
+ printk(KERN_ERR "X.25: invalid frame type %02X\n", frametype);
+ return;
}
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
@@ -280,20 +278,20 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
- case X25_CALL_REQUEST:
- case X25_CALL_ACCEPTED:
- case X25_CLEAR_REQUEST:
- case X25_CLEAR_CONFIRMATION:
- case X25_INTERRUPT:
- case X25_INTERRUPT_CONFIRMATION:
- case X25_RESET_REQUEST:
- case X25_RESET_CONFIRMATION:
- case X25_RESTART_REQUEST:
- case X25_RESTART_CONFIRMATION:
- case X25_REGISTRATION_REQUEST:
- case X25_REGISTRATION_CONFIRMATION:
- case X25_DIAGNOSTIC:
- return frame[2];
+ case X25_CALL_REQUEST:
+ case X25_CALL_ACCEPTED:
+ case X25_CLEAR_REQUEST:
+ case X25_CLEAR_CONFIRMATION:
+ case X25_INTERRUPT:
+ case X25_INTERRUPT_CONFIRMATION:
+ case X25_RESET_REQUEST:
+ case X25_RESET_CONFIRMATION:
+ case X25_RESTART_REQUEST:
+ case X25_RESTART_CONFIRMATION:
+ case X25_REGISTRATION_REQUEST:
+ case X25_REGISTRATION_CONFIRMATION:
+ case X25_DIAGNOSTIC:
+ return frame[2];
}
if (x25->neighbour->extended) {