aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
commit6882f933ccee5c3a86443ffc7621ce888b93ab6b (patch)
tree07998f54bd459c5345491fbaeae03bd60540c6e8 /net/core
parent12f4d0a8770ab26639091d0b2509b19681daad69 (diff)
downloadkernel_samsung_smdk4412-6882f933ccee5c3a86443ffc7621ce888b93ab6b.zip
kernel_samsung_smdk4412-6882f933ccee5c3a86443ffc7621ce888b93ab6b.tar.gz
kernel_samsung_smdk4412-6882f933ccee5c3a86443ffc7621ce888b93ab6b.tar.bz2
ipv4: Kill RT_CACHE_DEBUG
It's way past it's usefulness. And this gets rid of a bunch of stray ->rt_{dst,src} references. Even the comment documenting the macro was inaccurate (stated default was 1 when it's 0). If reintroduced, it should be done properly, with dynamic debug facilities. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dst.c22
1 files changed, 0 insertions, 22 deletions
diff --git a/net/core/dst.c b/net/core/dst.c
index 30f0093..da47a29 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -33,9 +33,6 @@
* 3) This list is guarded by a mutex,
* so that the gc_task and dst_dev_event() can be synchronized.
*/
-#if RT_CACHE_DEBUG >= 2
-static atomic_t dst_total = ATOMIC_INIT(0);
-#endif
/*
* We want to keep lock & list close together
@@ -69,10 +66,6 @@ static void dst_gc_task(struct work_struct *work)
unsigned long expires = ~0L;
struct dst_entry *dst, *next, head;
struct dst_entry *last = &head;
-#if RT_CACHE_DEBUG >= 2
- ktime_t time_start = ktime_get();
- struct timespec elapsed;
-#endif
mutex_lock(&dst_gc_mutex);
next = dst_busy_list;
@@ -146,15 +139,6 @@ loop:
spin_unlock_bh(&dst_garbage.lock);
mutex_unlock(&dst_gc_mutex);
-#if RT_CACHE_DEBUG >= 2
- elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
- printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
- " expires: %lu elapsed: %lu us\n",
- atomic_read(&dst_total), delayed, work_performed,
- expires,
- elapsed.tv_sec * USEC_PER_SEC +
- elapsed.tv_nsec / NSEC_PER_USEC);
-#endif
}
int dst_discard(struct sk_buff *skb)
@@ -205,9 +189,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->lastuse = jiffies;
dst->flags = flags;
dst->next = NULL;
-#if RT_CACHE_DEBUG >= 2
- atomic_inc(&dst_total);
-#endif
dst_entries_add(ops, 1);
return dst;
}
@@ -267,9 +248,6 @@ again:
dst->ops->destroy(dst);
if (dst->dev)
dev_put(dst->dev);
-#if RT_CACHE_DEBUG >= 2
- atomic_dec(&dst_total);
-#endif
kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;