aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/em_cmp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/em_cmp.c')
-rw-r--r--net/sched/em_cmp.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index cc49c93..bc45039 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/tc_ematch/tc_em_cmp.h>
+#include <asm/unaligned.h>
#include <net/pkt_cls.h>
static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp)
@@ -37,8 +38,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
break;
case TCF_EM_ALIGN_U16:
- val = *ptr << 8;
- val |= *(ptr+1);
+ val = get_unaligned_be16(ptr);
if (cmp_needs_transformation(cmp))
val = be16_to_cpu(val);
@@ -47,10 +47,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
case TCF_EM_ALIGN_U32:
/* Worth checking boundries? The branching seems
* to get worse. Visit again. */
- val = *ptr << 24;
- val |= *(ptr+1) << 16;
- val |= *(ptr+2) << 8;
- val |= *(ptr+3);
+ val = get_unaligned_be32(ptr);
if (cmp_needs_transformation(cmp))
val = be32_to_cpu(val);